diff options
1212 files changed, 14135 insertions, 7176 deletions
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index d2b6fda3d67b..ab2fe0eda1d7 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -145,7 +145,7 @@ feature enabled.] In this mode ``intel_pstate`` registers utilization update callbacks with the CPU scheduler in order to run a P-state selection algorithm, either -``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy +``powersave`` or ``performance``, depending on the ``scaling_governor`` policy setting in ``sysfs``. The current CPU frequency information to be made available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is periodically updated by those utilization update callbacks too. diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst index 1e5c0f00cb2f..dbf5acd49f35 100644 --- a/Documentation/admin-guide/pm/sleep-states.rst +++ b/Documentation/admin-guide/pm/sleep-states.rst @@ -15,7 +15,7 @@ Sleep States That Can Be Supported ================================== Depending on its configuration and the capabilities of the platform it runs on, -the Linux kernel can support up to four system sleep states, includig +the Linux kernel can support up to four system sleep states, including hibernation and up to three variants of system suspend. The sleep states that can be supported by the kernel are listed below. diff --git a/Documentation/bpf/bpf_devel_QA.txt b/Documentation/bpf/bpf_devel_QA.txt index 1a0b704e1a38..da57601153a0 100644 --- a/Documentation/bpf/bpf_devel_QA.txt +++ b/Documentation/bpf/bpf_devel_QA.txt @@ -557,6 +557,14 @@ A: Although LLVM IR generation and optimization try to stay architecture pulls in some header files containing file scope host assembly codes. - You can add "-fno-jump-tables" to work around the switch table issue. - Otherwise, you can use bpf target. + Otherwise, you can use bpf target. Additionally, you _must_ use bpf target + when: + + - Your program uses data structures with pointer or long / unsigned long + types that interface with BPF helpers or context data structures. Access + into these structures is verified by the BPF verifier and may result + in verification failures if the native architecture is not aligned with + the BPF architecture, e.g. 64-bit. An example of this is + BPF_PROG_TYPE_SK_MSG require '-target bpf' Happy BPF hacking! diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index ff335f8aeb39..92f30006adae 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst @@ -136,6 +136,19 @@ Sorting .. kernel-doc:: lib/list_sort.c :export: +Text Searching +-------------- + +.. kernel-doc:: lib/textsearch.c + :doc: ts_intro + +.. kernel-doc:: lib/textsearch.c + :export: + +.. kernel-doc:: include/linux/textsearch.h + :functions: textsearch_find textsearch_next \ + textsearch_get_pattern textsearch_get_pattern_len + UUID/GUID --------- diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 4bcd4b7f79f9..3d01948ea061 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -264,7 +264,10 @@ i) Constructor data device, but just remove the mapping. read_only: Don't allow any changes to be made to the pool - metadata. + metadata. This mode is only available after the + thin-pool has been created and first used in full + read/write mode. It cannot be specified on initial + thin-pool creation. error_if_no_space: Error IOs, instead of queueing, if no space. diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt index f4006d3c9fdf..c760ecb81381 100644 --- a/Documentation/devicetree/bindings/ata/ahci-platform.txt +++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt @@ -30,7 +30,6 @@ compatible: Optional properties: - dma-coherent : Present if dma operations are coherent - clocks : a list of phandle + clock specifier pairs -- resets : a list of phandle + reset specifier pairs - target-supply : regulator for SATA target power - phys : reference to the SATA PHY node - phy-names : must be "sata-phy" diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt index 557fa765adcb..5d2519af4bb5 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-common.txt +++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt @@ -38,7 +38,7 @@ Display Timings require specific display timings. The panel-timing subnode expresses those timings as specified in the timing subnode section of the display timing bindings defined in - Documentation/devicetree/bindings/display/display-timing.txt. + Documentation/devicetree/bindings/display/panel/display-timing.txt. Connectivity diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index aadfb236d53a..61315eaa7660 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -26,6 +26,7 @@ Required Properties: - "renesas,dmac-r8a7794" (R-Car E2) - "renesas,dmac-r8a7795" (R-Car H3) - "renesas,dmac-r8a7796" (R-Car M3-W) + - "renesas,dmac-r8a77965" (R-Car M3-N) - "renesas,dmac-r8a77970" (R-Car V3M) - "renesas,dmac-r8a77980" (R-Car V3H) diff --git a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt index 23e3abc3fdef..c88919480d37 100644 --- a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt +++ b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt @@ -4,6 +4,13 @@ Required properties: - compatible: atmel,maxtouch + The following compatibles have been used in various products but are + deprecated: + atmel,qt602240_ts + atmel,atmel_mxt_ts + atmel,atmel_mxt_tp + atmel,mXT224 + - reg: The I2C address of the device - interrupts: The sink for the touchpad's IRQ output diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt index 93c3a6ae32f9..ac71daa46195 100644 --- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt +++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt @@ -5,7 +5,9 @@ Required properties: - compatible: Must contain one or more of the following: - "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller. - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller. - - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3) compatible controller. + - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller. + - "renesas,r8a77970-canfd" for R8A77970 (R-Car V3M) compatible controller. + - "renesas,r8a77980-canfd" for R8A77980 (R-Car V3H) compatible controller. When compatible with the generic version, nodes must list the SoC-specific version corresponding to the platform first, followed by the diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index c306f55d335b..890526dbfc26 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt @@ -18,6 +18,7 @@ Required properties: - "renesas,etheravb-r8a7795" for the R8A7795 SoC. - "renesas,etheravb-r8a7796" for the R8A7796 SoC. + - "renesas,etheravb-r8a77965" for the R8A77965 SoC. - "renesas,etheravb-r8a77970" for the R8A77970 SoC. - "renesas,etheravb-r8a77980" for the R8A77980 SoC. - "renesas,etheravb-r8a77995" for the R8A77995 SoC. diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt index ed5eb547afc8..64bc5c2a76da 100644 --- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt @@ -56,9 +56,9 @@ pins it needs, and how they should be configured, with regard to muxer configuration, drive strength and pullups. If one of these options is not set, its actual value will be unspecified. -This driver supports the generic pin multiplexing and configuration -bindings. For details on each properties, you can refer to -./pinctrl-bindings.txt. +Allwinner A1X Pin Controller supports the generic pin multiplexing and +configuration bindings. For details on each properties, you can refer to + ./pinctrl-bindings.txt. Required sub-node properties: - pins diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt index 8ff65fa632fd..c06c045126fc 100644 --- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt +++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt @@ -21,7 +21,7 @@ Required properties: - interrupts : identifier to the device interrupt - clocks : a list of phandle + clock-specifier pairs, one for each entry in clock names. -- clocks-names : +- clock-names : * "xtal" for external xtal clock identifier * "pclk" for the bus core clock, either the clk81 clock or the gate clock * "baud" for the source of the baudrate generator, can be either the xtal diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt index 2ae2fee7e023..b7e0e32b9ac6 100644 --- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt +++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt @@ -24,7 +24,7 @@ Required properties: - Must contain two elements for the extended variant of the IP (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx", respectively the UART TX interrupt and the UART RX interrupt. A - corresponding interrupts-names property must be defined. + corresponding interrupt-names property must be defined. - For backward compatibility reasons, a single element interrupts property is also supported for the standard variant of the IP, containing only the UART sum interrupt. This form is deprecated diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index ad962f4ec3aa..106808b55b6d 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt @@ -17,6 +17,8 @@ Required properties: - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART. - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART. - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART. + - "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART. + - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART. - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. @@ -41,6 +43,8 @@ Required properties: - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. + - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART. + - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART. - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. - "renesas,scif-r8a77980" for R8A77980 (R-Car V3H) SCIF compatible UART. diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index 1b596fd38dc4..b957acff57aa 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt @@ -49,19 +49,6 @@ on the SoC (only first trip points defined in DT will be configured): - samsung,exynos5433-tmu: 8 - samsung,exynos7-tmu: 8 -Following properties are mandatory (depending on SoC): -- samsung,tmu_gain: Gain value for internal TMU operation. -- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage -- samsung,tmu_noise_cancel_mode: Mode for noise cancellation -- samsung,tmu_efuse_value: Default level of temperature - it is needed when - in factory fusing produced wrong value -- samsung,tmu_min_efuse_value: Minimum temperature fused value -- samsung,tmu_max_efuse_value: Maximum temperature fused value -- samsung,tmu_first_point_trim: First point trimming value -- samsung,tmu_second_point_trim: Second point trimming value -- samsung,tmu_default_temp_offset: Default temperature offset -- samsung,tmu_cal_type: Callibration type - ** Optional properties: - vtmu-supply: This entry is optional and provides the regulator node supplying @@ -78,7 +65,7 @@ Example 1): clocks = <&clock 383>; clock-names = "tmu_apbif"; vtmu-supply = <&tmu_regulator_node>; - #include "exynos4412-tmu-sensor-conf.dtsi" + #thermal-sensor-cells = <0>; }; Example 2): @@ -89,7 +76,7 @@ Example 2): interrupts = <0 58 0>; clocks = <&clock 21>; clock-names = "tmu_apbif"; - #include "exynos5440-tmu-sensor-conf.dtsi" + #thermal-sensor-cells = <0>; }; Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") @@ -99,7 +86,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") interrupts = <0 184 0>; clocks = <&clock 318>, <&clock 318>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; - #include "exynos4412-tmu-sensor-conf.dtsi" + #thermal-sensor-cells = <0>; }; tmu_cpu3: tmu@1006c000 { @@ -108,7 +95,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") interrupts = <0 185 0>; clocks = <&clock 318>, <&clock 319>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; - #include "exynos4412-tmu-sensor-conf.dtsi" + #thermal-sensor-cells = <0>; }; tmu_gpu: tmu@100a0000 { @@ -117,7 +104,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") interrupts = <0 215 0>; clocks = <&clock 319>, <&clock 318>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; - #include "exynos4412-tmu-sensor-conf.dtsi" + #thermal-sensor-cells = <0>; }; Note: For multi-instance tmu each instance should have an alias correctly diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 1719d47a5e2f..cc553f0952c5 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt @@ -55,8 +55,7 @@ of heat dissipation). For example a fan's cooling states correspond to the different fan speeds possible. Cooling states are referred to by single unsigned integers, where larger numbers mean greater heat dissipation. The precise set of cooling states associated with a device -(as referred to by the cooling-min-level and cooling-max-level -properties) should be defined in a particular device's binding. +should be defined in a particular device's binding. For more examples of cooling devices, refer to the example sections below. Required properties: @@ -69,15 +68,6 @@ Required properties: See Cooling device maps section below for more details on how consumers refer to cooling devices. -Optional properties: -- cooling-min-level: An integer indicating the smallest - Type: unsigned cooling state accepted. Typically 0. - Size: one cell - -- cooling-max-level: An integer indicating the largest - Type: unsigned cooling state accepted. - Size: one cell - * Trip points The trip node is a node to describe a point in the temperature domain @@ -226,8 +216,6 @@ cpus { 396000 950000 198000 850000 >; - cooling-min-level = <0>; - cooling-max-level = <3>; #cooling-cells = <2>; /* min followed by max */ }; ... @@ -241,8 +229,6 @@ cpus { */ fan0: fan@48 { ... - cooling-min-level = <0>; - cooling-max-level = <9>; #cooling-cells = <2>; /* min followed by max */ }; }; diff --git a/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt new file mode 100644 index 000000000000..ea22dfe485be --- /dev/null +++ b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt @@ -0,0 +1,21 @@ +Nuvoton NPCM7xx timer + +Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit +timer counters. + +Required properties: +- compatible : "nuvoton,npcm750-timer" for Poleg NPCM750. +- reg : Offset and length of the register set for the device. +- interrupts : Contain the timer interrupt with flags for + falling edge. +- clocks : phandle of timer reference clock (usually a 25 MHz clock). + +Example: + +timer@f0008000 { + compatible = "nuvoton,npcm750-timer"; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; + reg = <0xf0008000 0x50>; + clocks = <&clk NPCM7XX_CLK_TIMER>; +}; + diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt index b4aa7ddb5b13..f82087b220f4 100644 --- a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt +++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt @@ -15,7 +15,7 @@ Required properties: - interrupts : Should be the clock event device interrupt. - clocks : The clocks provided by the SoC to drive the timer, must contain an entry for each entry in clock-names. -- clock-names : Must include the following entries: "igp" and "per". +- clock-names : Must include the following entries: "ipg" and "per". Example: tpm5: tpm@40260000 { diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index c4c00dff4b56..bd1dd316fb23 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt @@ -28,7 +28,10 @@ Required properties: - interrupts: one XHCI interrupt should be described here. Optional properties: - - clocks: reference to a clock + - clocks: reference to the clocks + - clock-names: mandatory if there is a second clock, in this case + the name must be "core" for the first clock and "reg" for the + second one - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM - usb3-lpm-capable: determines if platform is USB3 LPM capable - quirk-broken-port-ped: set if the controller has broken port disable mechanism diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index b5f978a4cac6..a38d8bfae19c 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -182,6 +182,7 @@ karo Ka-Ro electronics GmbH keithkoep Keith & Koep GmbH keymile Keymile GmbH khadas Khadas +kiebackpeter Kieback & Peter GmbH kinetic Kinetic Technologies kingnovel Kingnovel Technology Co., Ltd. kosagi Sutajio Ko-Usagi PTE Ltd. diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt index a4feb6dde8cd..725fb8d255c1 100644 --- a/Documentation/devicetree/overlay-notes.txt +++ b/Documentation/devicetree/overlay-notes.txt @@ -98,6 +98,14 @@ Finally, if you need to remove all overlays in one-go, just call of_overlay_remove_all() which will remove every single one in the correct order. +In addition, there is the option to register notifiers that get called on +overlay operations. See of_overlay_notifier_register/unregister and +enum of_overlay_notify_action for details. + +Note that a notifier callback is not supposed to store pointers to a device +tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the +respective node it received. + Overlay DTS Format ------------------ diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst index 96a0423d5dba..82a3e43b6864 100644 --- a/Documentation/doc-guide/parse-headers.rst +++ b/Documentation/doc-guide/parse-headers.rst @@ -177,14 +177,14 @@ BUGS **** -Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com> +Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org> COPYRIGHT ********* -Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>. +Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>. License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst index cf4516dfbf96..d5ec95a7195b 100644 --- a/Documentation/driver-api/firmware/request_firmware.rst +++ b/Documentation/driver-api/firmware/request_firmware.rst @@ -17,17 +17,17 @@ an error is returned. request_firmware ---------------- -.. kernel-doc:: drivers/base/firmware_class.c +.. kernel-doc:: drivers/base/firmware_loader/main.c :functions: request_firmware request_firmware_direct ----------------------- -.. kernel-doc:: drivers/base/firmware_class.c +.. kernel-doc:: drivers/base/firmware_loader/main.c :functions: request_firmware_direct request_firmware_into_buf ------------------------- -.. kernel-doc:: drivers/base/firmware_class.c +.. kernel-doc:: drivers/base/firmware_loader/main.c :functions: request_firmware_into_buf Asynchronous firmware requests @@ -41,7 +41,7 @@ in atomic contexts. request_firmware_nowait ----------------------- -.. kernel-doc:: drivers/base/firmware_class.c +.. kernel-doc:: drivers/base/firmware_loader/main.c :functions: request_firmware_nowait Special optimizations on reboot @@ -50,12 +50,12 @@ Special optimizations on reboot Some devices have an optimization in place to enable the firmware to be retained during system reboot. When such optimizations are used the driver author must ensure the firmware is still available on resume from suspend, -this can be done with firmware_request_cache() insted of requesting for the -firmare to be loaded. +this can be done with firmware_request_cache() instead of requesting for the +firmware to be loaded. firmware_request_cache() ------------------------ -.. kernel-doc:: drivers/base/firmware_class.c +------------------------ +.. kernel-doc:: drivers/base/firmware_loader/main.c :functions: firmware_request_cache request firmware API expected driver use diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst index 6d9ff316b608..bee1b9a1702f 100644 --- a/Documentation/driver-api/infrastructure.rst +++ b/Documentation/driver-api/infrastructure.rst @@ -28,7 +28,7 @@ Device Drivers Base .. kernel-doc:: drivers/base/node.c :internal: -.. kernel-doc:: drivers/base/firmware_class.c +.. kernel-doc:: drivers/base/firmware_loader/main.c :export: .. kernel-doc:: drivers/base/transport_class.c diff --git a/Documentation/driver-api/usb/typec.rst b/Documentation/driver-api/usb/typec.rst index feb31946490b..48ff58095f11 100644 --- a/Documentation/driver-api/usb/typec.rst +++ b/Documentation/driver-api/usb/typec.rst @@ -210,7 +210,7 @@ If the connector is dual-role capable, there may also be a switch for the data role. USB Type-C Connector Class does not supply separate API for them. The port drivers can use USB Role Class API with those. -Illustration of the muxes behind a connector that supports an alternate mode: +Illustration of the muxes behind a connector that supports an alternate mode:: ------------------------ | Connector | diff --git a/Documentation/i2c/dev-interface b/Documentation/i2c/dev-interface index d04e6e4964ee..fbed645ccd75 100644 --- a/Documentation/i2c/dev-interface +++ b/Documentation/i2c/dev-interface @@ -9,8 +9,8 @@ i2c adapters present on your system at a given time. i2cdetect is part of the i2c-tools package. I2C device files are character device files with major device number 89 -and a minor device number corresponding to the number assigned as -explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., +and a minor device number corresponding to the number assigned as +explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., i2c-10, ...). All 256 minor device numbers are reserved for i2c. @@ -23,11 +23,6 @@ First, you need to include these two headers: #include <linux/i2c-dev.h> #include <i2c/smbus.h> -(Please note that there are two files named "i2c-dev.h" out there. One is -distributed with the Linux kernel and the other one is included in the -source tree of i2c-tools. They used to be different in content but since 2012 -they're identical. You should use "linux/i2c-dev.h"). - Now, you have to decide which adapter you want to access. You should inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this. Adapter numbers are assigned somewhat dynamically, so you can not @@ -38,7 +33,7 @@ Next thing, open the device file, as follows: int file; int adapter_nr = 2; /* probably dynamically determined */ char filename[20]; - + snprintf(filename, 19, "/dev/i2c-%d", adapter_nr); file = open(filename, O_RDWR); if (file < 0) { @@ -72,8 +67,10 @@ the device supports them. Both are illustrated below. /* res contains the read word */ } - /* Using I2C Write, equivalent of - i2c_smbus_write_word_data(file, reg, 0x6543) */ + /* + * Using I2C Write, equivalent of + * i2c_smbus_write_word_data(file, reg, 0x6543) + */ buf[0] = reg; buf[1] = 0x43; buf[2] = 0x65; @@ -140,14 +137,14 @@ ioctl(file, I2C_RDWR, struct i2c_rdwr_ioctl_data *msgset) set in each message, overriding the values set with the above ioctl's. ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args) - Not meant to be called directly; instead, use the access functions - below. + If possible, use the provided i2c_smbus_* methods described below instead + of issuing direct ioctls. You can do plain i2c transactions by using read(2) and write(2) calls. You do not need to pass the address byte; instead, set it through ioctl I2C_SLAVE before you try to access the device. -You can do SMBus level transactions (see documentation file smbus-protocol +You can do SMBus level transactions (see documentation file smbus-protocol for details) through the following functions: __s32 i2c_smbus_write_quick(int file, __u8 value); __s32 i2c_smbus_read_byte(int file); @@ -158,7 +155,7 @@ for details) through the following functions: __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value); __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value); __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values); - __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, + __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, __u8 *values); All these transactions return -1 on failure; you can read errno to see what happened. The 'write' transactions return 0 on success; the @@ -166,10 +163,9 @@ what happened. The 'write' transactions return 0 on success; the returns the number of values read. The block buffers need not be longer than 32 bytes. -The above functions are all inline functions, that resolve to calls to -the i2c_smbus_access function, that on its turn calls a specific ioctl -with the data in a specific format. Read the source code if you -want to know what happens behind the screens. +The above functions are made available by linking against the libi2c library, +which is provided by the i2c-tools project. See: +https://git.kernel.org/pub/scm/utils/i2c-tools/i2c-tools.git/. Implementation details diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 84bb74dcae12..7f7413e597f3 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -217,7 +217,6 @@ Code Seq#(hex) Include File Comments 'd' 02-40 pcmcia/ds.h conflict! 'd' F0-FF linux/digi1.h 'e' all linux/digi1.h conflict! -'e' 00-1F drivers/net/irda/irtty-sir.h conflict! 'f' 00-1F linux/ext2_fs.h conflict! 'f' 00-1F linux/ext3_fs.h conflict! 'f' 00-0F fs/jfs/jfs_dinode.h conflict! @@ -247,7 +246,6 @@ Code Seq#(hex) Include File Comments 'm' all linux/synclink.h conflict! 'm' 00-19 drivers/message/fusion/mptctl.h conflict! 'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! -'m' 00-1F net/irda/irmod.h conflict! 'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c 'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 'n' E0-FF linux/matroxfb.h matroxfb diff --git a/Documentation/livepatch/shadow-vars.txt b/Documentation/livepatch/shadow-vars.txt index 89c66634d600..ecc09a7be5dd 100644 --- a/Documentation/livepatch/shadow-vars.txt +++ b/Documentation/livepatch/shadow-vars.txt @@ -34,9 +34,13 @@ meta-data and shadow-data: - data[] - storage for shadow data It is important to note that the klp_shadow_alloc() and -klp_shadow_get_or_alloc() calls, described below, store a *copy* of the -data that the functions are provided. Callers should provide whatever -mutual exclusion is required of the shadow data. +klp_shadow_get_or_alloc() are zeroing the variable by default. +They also allow to call a custom constructor function when a non-zero +value is needed. Callers should provide whatever mutual exclusion +is required. + +Note that the constructor is called under klp_shadow_lock spinlock. It allows +to do actions that can be done only once when a new variable is allocated. * klp_shadow_get() - retrieve a shadow variable data pointer - search hashtable for <obj, id> pair @@ -47,7 +51,7 @@ mutual exclusion is required of the shadow data. - WARN and return NULL - if <obj, id> doesn't already exist - allocate a new shadow variable - - copy data into the new shadow variable + - initialize the variable using a custom constructor and data when provided - add <obj, id> to the global hashtable * klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable @@ -56,16 +60,20 @@ mutual exclusion is required of the shadow data. - return existing shadow variable - if <obj, id> doesn't already exist - allocate a new shadow variable - - copy data into the new shadow variable + - initialize the variable using a custom constructor and data when provided - add <obj, id> pair to the global hashtable * klp_shadow_free() - detach and free a <obj, id> shadow variable - find and remove a <obj, id> reference from global hashtable - - if found, free shadow variable + - if found + - call destructor function if defined + - free shadow variable * klp_shadow_free_all() - detach and free all <*, id> shadow variables - find and remove any <*, id> references from global hashtable - - if found, free shadow variable + - if found + - call destructor function if defined + - free shadow variable 2. Use cases @@ -107,7 +115,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); /* Attach a corresponding shadow variable, then initialize it */ - ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp); + ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp, + NULL, NULL); if (!ps_lock) goto shadow_fail; spin_lock_init(ps_lock); @@ -131,7 +140,7 @@ variable: void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) { - klp_shadow_free(sta, PS_LOCK); + klp_shadow_free(sta, PS_LOCK, NULL); kfree(sta); ... @@ -148,16 +157,24 @@ shadow variables to parents already in-flight. For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is inside ieee80211_sta_ps_deliver_wakeup(): +int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data) +{ + spinlock_t *lock = shadow_data; + + spin_lock_init(lock); + return 0; +} + #define PS_LOCK 1 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) { - DEFINE_SPINLOCK(ps_lock_fallback); spinlock_t *ps_lock; /* sync with ieee80211_tx_h_unicast_ps_buf */ ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, - &ps_lock_fallback, sizeof(ps_lock_fallback), - GFP_ATOMIC); + sizeof(*ps_lock), GFP_ATOMIC, + ps_lock_shadow_ctor, NULL); + if (ps_lock) spin_lock(ps_lock); ... diff --git a/Documentation/media/uapi/rc/keytable.c.rst b/Documentation/media/uapi/rc/keytable.c.rst index e6ce1e3f5a78..217237f93b37 100644 --- a/Documentation/media/uapi/rc/keytable.c.rst +++ b/Documentation/media/uapi/rc/keytable.c.rst @@ -7,7 +7,7 @@ file: uapi/v4l/keytable.c /* keytable.c - This program allows checking/replacing keys at IR - Copyright (C) 2006-2009 Mauro Carvalho Chehab <mchehab@infradead.org> + Copyright (C) 2006-2009 Mauro Carvalho Chehab <mchehab@kernel.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/Documentation/media/uapi/v4l/v4l2grab.c.rst b/Documentation/media/uapi/v4l/v4l2grab.c.rst index 5aabd0b7b089..f0d0ab6abd41 100644 --- a/Documentation/media/uapi/v4l/v4l2grab.c.rst +++ b/Documentation/media/uapi/v4l/v4l2grab.c.rst @@ -6,7 +6,7 @@ file: media/v4l/v4l2grab.c .. code-block:: c /* V4L2 video picture grabber - Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org> + Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index a4508ec1816b..fd55c7de9991 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -169,7 +169,7 @@ access to BPF code as well. BPF engine and instruction set ------------------------------ -Under tools/net/ there's a small helper tool called bpf_asm which can +Under tools/bpf/ there's a small helper tool called bpf_asm which can be used to write low-level filters for example scenarios mentioned in the previous section. Asm-like syntax mentioned here has been implemented in bpf_asm and will be used for further explanations (instead of dealing with @@ -359,7 +359,7 @@ $ ./bpf_asm -c foo In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF filters that might not be obvious at first, it's good to test filters before attaching to a live system. For that purpose, there's a small tool called -bpf_dbg under tools/net/ in the kernel source directory. This debugger allows +bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows for testing BPF filters against given pcap files, single stepping through the BPF code on the pcap's packets and to do BPF machine register dumps. @@ -483,7 +483,7 @@ Example output from dmesg: [ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 [ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 -In the kernel source tree under tools/net/, there's bpf_jit_disasm for +In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for generating disassembly out of the kernel log's hexdump: # ./bpf_jit_disasm diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 5dc1a040a2f1..35ffaa281b26 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1390,26 +1390,26 @@ mld_qrv - INTEGER Default: 2 (as specified by RFC3810 9.1) Minimum: 1 (as specified by RFC6636 4.5) -max_dst_opts_cnt - INTEGER +max_dst_opts_number - INTEGER Maximum number of non-padding TLVs allowed in a Destination options extension header. If this value is less than zero then unknown options are disallowed and the number of known TLVs allowed is the absolute value of this number. Default: 8 -max_hbh_opts_cnt - INTEGER +max_hbh_opts_number - INTEGER Maximum number of non-padding TLVs allowed in a Hop-by-Hop options extension header. If this value is less than zero then unknown options are disallowed and the number of known TLVs allowed is the absolute value of this number. Default: 8 -max dst_opts_len - INTEGER +max_dst_opts_length - INTEGER Maximum length allowed for a Destination options extension header. Default: INT_MAX (unlimited) -max hbh_opts_len - INTEGER +max_hbh_length - INTEGER Maximum length allowed for a Hop-by-Hop options extension header. Default: INT_MAX (unlimited) @@ -2126,18 +2126,3 @@ max_dgram_qlen - INTEGER Default: 10 - -UNDOCUMENTED: - -/proc/sys/net/irda/* - fast_poll_increase FIXME - warn_noreply_time FIXME - discovery_slots FIXME - slot_timeout FIXME - max_baud_rate FIXME - discovery_timeout FIXME - lap_keepalive_time FIXME - max_noreply_time FIXME - max_tx_data_size FIXME - max_tx_window FIXME - min_tx_turn_time FIXME diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt index 31abd04b9572..6f55eb960a6d 100644 --- a/Documentation/power/suspend-and-cpuhotplug.txt +++ b/Documentation/power/suspend-and-cpuhotplug.txt @@ -168,7 +168,7 @@ update on the CPUs, as discussed below: [Please bear in mind that the kernel requests the microcode images from userspace, using the request_firmware() function defined in -drivers/base/firmware_class.c] +drivers/base/firmware_loader/main.c] a. When all the CPUs are identical: diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst index 00cecf1fcba9..633be1043690 100644 --- a/Documentation/process/magic-number.rst +++ b/Documentation/process/magic-number.rst @@ -157,8 +157,5 @@ memory management. See ``include/sound/sndmagic.h`` for complete list of them. M OSS sound drivers have their magic numbers constructed from the soundcard PCI ID - these are not listed here as well. -IrDA subsystem also uses large number of own magic numbers, see -``include/net/irda/irda.h`` for a complete list of them. - HFS is another larger user of magic numbers - you can find them in ``fs/hfs/hfs.h``. diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl index a958d8b5e99d..d410f47567e9 100755 --- a/Documentation/sphinx/parse-headers.pl +++ b/Documentation/sphinx/parse-headers.pl @@ -387,11 +387,11 @@ tree for more details. =head1 BUGS -Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com> +Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org> =head1 COPYRIGHT -Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>. +Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>. License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index e45f0786f3f9..67d9c38e95eb 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -461,9 +461,17 @@ of ftrace. Here is a list of some of the key files: and ticks at the same rate as the hardware clocksource. boot: - Same as mono. Used to be a separate clock which accounted - for the time spent in suspend while CLOCK_MONOTONIC did - not. + This is the boot clock (CLOCK_BOOTTIME) and is based on the + fast monotonic clock, but also accounts for time spent in + suspend. Since the clock access is designed for use in + tracing in the suspend path, some side effects are possible + if clock is accessed after the suspend time is accounted before + the fast mono clock is updated. In this case, the clock update + appears to happen slightly sooner than it normally would have. + Also on 32-bit systems, it's possible that the 64-bit boot offset + sees a partial update. These effects are rare and post + processing should be able to handle them. See comments in the + ktime_get_boot_fast_ns() function for more information. To set a clock, simply echo the clock name into this file:: diff --git a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt index 698660b7f21f..c77c0f060864 100644 --- a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt +++ b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt @@ -6,7 +6,7 @@ communicating in English you can also ask the Chinese maintainer for help. Contact the Chinese maintainer if this translation is outdated or if there is a problem with the translation. -Maintainer: Mauro Carvalho Chehab <mchehab@infradead.org> +Maintainer: Mauro Carvalho Chehab <mchehab@kernel.org> Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> --------------------------------------------------------------------- Documentation/video4linux/v4l2-framework.txt 的中文翻译 @@ -14,7 +14,7 @@ Documentation/video4linux/v4l2-framework.txt 的中文翻译 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 译存在问题,请联系中文版维护者。 -英文版维护者: Mauro Carvalho Chehab <mchehab@infradead.org> +英文版维护者: Mauro Carvalho Chehab <mchehab@kernel.org> 中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 1c7958b57fe9..758bf403a169 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns: ARM 64-bit FP registers have the following id bit patterns: 0x4030 0000 0012 0 <regno:12> +ARM firmware pseudo-registers have the following bit pattern: + 0x4030 0000 0014 <regno:16> + arm64 registers are mapped using the lower 32 bits. The upper 16 of that is the register group type, or coprocessor number: @@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value: arm64 system registers have the following id bit patterns: 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> +arm64 firmware pseudo-registers have the following bit pattern: + 0x6030 0000 0014 <regno:16> + MIPS registers are mapped using the lower 32 bits. The upper 16 of that is the register group type: @@ -2510,7 +2516,8 @@ Possible features: and execute guest code when KVM_RUN is called. - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). - - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. + - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision + backward compatible with v0.2) for the CPU. Depends on KVM_CAP_ARM_PSCI_0_2. - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. Depends on KVM_CAP_ARM_PMU_V3. diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt new file mode 100644 index 000000000000..aafdab887b04 --- /dev/null +++ b/Documentation/virtual/kvm/arm/psci.txt @@ -0,0 +1,30 @@ +KVM implements the PSCI (Power State Coordination Interface) +specification in order to provide services such as CPU on/off, reset +and power-off to the guest. + +The PSCI specification is regularly updated to provide new features, +and KVM implements these updates if they make sense from a virtualization +point of view. + +This means that a guest booted on two different versions of KVM can +observe two different "firmware" revisions. This could cause issues if +a given guest is tied to a particular PSCI revision (unlikely), or if +a migration causes a different PSCI version to be exposed out of the +blue to an unsuspecting guest. + +In order to remedy this situation, KVM exposes a set of "firmware +pseudo-registers" that can be manipulated using the GET/SET_ONE_REG +interface. These registers can be saved/restored by userspace, and set +to a convenient value if required. + +The following register is defined: + +* KVM_REG_ARM_PSCI_VERSION: + + - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set + (and thus has already been initialized) + - Returns the current PSCI version on GET_ONE_REG (defaulting to the + highest PSCI version implemented by KVM and compatible with v0.2) + - Allows any PSCI version implemented by KVM and compatible with + v0.2 to be set with SET_ONE_REG + - Affects the whole VM (even if the register view is per-vcpu) diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..58b9861ccf99 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -137,9 +137,9 @@ Maintainers List (try to look for most precise areas first) ----------------------------------- 3C59X NETWORK DRIVER -M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de> +M: Steffen Klassert <klassert@kernel.org> L: netdev@vger.kernel.org -S: Maintained +S: Odd Fixes F: Documentation/networking/vortex.txt F: drivers/net/ethernet/3com/3c59x.c @@ -564,8 +564,9 @@ S: Maintained F: drivers/media/dvb-frontends/af9033* AFFS FILE SYSTEM +M: David Sterba <dsterba@suse.com> L: linux-fsdevel@vger.kernel.org -S: Orphan +S: Odd Fixes F: Documentation/filesystems/affs.txt F: fs/affs/ @@ -905,6 +906,8 @@ ANDROID ION DRIVER M: Laura Abbott <labbott@redhat.com> M: Sumit Semwal <sumit.semwal@linaro.org> L: devel@driverdev.osuosl.org +L: dri-devel@lists.freedesktop.org +L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) S: Supported F: drivers/staging/android/ion F: drivers/staging/android/uapi/ion.h @@ -1208,7 +1211,6 @@ F: drivers/*/*alpine* ARM/ARTPEC MACHINE SUPPORT M: Jesper Nilsson <jesper.nilsson@axis.com> M: Lars Persson <lars.persson@axis.com> -M: Niklas Cassel <niklas.cassel@axis.com> S: Maintained L: linux-arm-kernel@axis.com F: arch/arm/mach-artpec @@ -1373,7 +1375,8 @@ F: arch/arm/mach-ebsa110/ F: drivers/net/ethernet/amd/am79c961a.* ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT -M: Uwe Kleine-König <kernel@pengutronix.de> +M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> +R: Pengutronix Kernel Team <kernel@pengutronix.de> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: efm32 @@ -1401,7 +1404,8 @@ F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo <shawnguo@kernel.org> -M: Sascha Hauer <kernel@pengutronix.de> +M: Sascha Hauer <s.hauer@pengutronix.de> +R: Pengutronix Kernel Team <kernel@pengutronix.de> R: Fabio Estevam <fabio.estevam@nxp.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -1416,7 +1420,8 @@ F: include/soc/imx/ ARM/FREESCALE VYBRID ARM ARCHITECTURE M: Shawn Guo <shawnguo@kernel.org> -M: Sascha Hauer <kernel@pengutronix.de> +M: Sascha Hauer <s.hauer@pengutronix.de> +R: Pengutronix Kernel Team <kernel@pengutronix.de> R: Stefan Agner <stefan@agner.ch> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -2549,7 +2554,6 @@ F: Documentation/devicetree/bindings/sound/axentia,* F: sound/soc/atmel/tse850-pcm5142.c AZ6007 DVB DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -2614,7 +2618,7 @@ S: Maintained F: drivers/net/hamradio/baycom* BCACHE (BLOCK LAYER CACHE) -M: Michael Lyle <mlyle@lyle.org> +M: Coly Li <colyli@suse.de> M: Kent Overstreet <kent.overstreet@gmail.com> L: linux-bcache@vger.kernel.org W: http://bcache.evilpiepirate.org @@ -3078,7 +3082,6 @@ F: include/linux/btrfs* F: include/uapi/linux/btrfs* BTTV VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -3688,7 +3691,6 @@ F: drivers/cpufreq/arm_big_little_dt.c CPU POWER MONITORING SUBSYSTEM M: Thomas Renninger <trenn@suse.com> -M: Shuah Khan <shuahkh@osg.samsung.com> M: Shuah Khan <shuah@kernel.org> L: linux-pm@vger.kernel.org S: Maintained @@ -3807,7 +3809,6 @@ S: Maintained F: drivers/media/dvb-frontends/cx24120* CX88 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -4245,6 +4246,9 @@ F: include/trace/events/fs_dax.h DEVICE DIRECT ACCESS (DAX) M: Dan Williams <dan.j.williams@intel.com> +M: Dave Jiang <dave.jiang@intel.com> +M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Vishal Verma <vishal.l.verma@intel.com> L: linux-nvdimm@lists.01.org S: Supported F: drivers/dax/ @@ -5045,7 +5049,6 @@ F: drivers/edac/thunderx_edac* EDAC-CORE M: Borislav Petkov <bp@alien8.de> -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-edac@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next @@ -5074,7 +5077,6 @@ S: Maintained F: drivers/edac/fsl_ddr_edac.* EDAC-GHES -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-edac@vger.kernel.org S: Maintained @@ -5091,21 +5093,18 @@ S: Maintained F: drivers/edac/i5000_edac.c EDAC-I5400 -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i5400_edac.c EDAC-I7300 -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7300_edac.c EDAC-I7CORE -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-edac@vger.kernel.org S: Maintained @@ -5155,7 +5154,6 @@ S: Maintained F: drivers/edac/r82600_edac.c EDAC-SBRIDGE -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-edac@vger.kernel.org S: Maintained @@ -5214,7 +5212,6 @@ S: Maintained F: drivers/net/ethernet/ibm/ehea/ EM28XX VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -5652,7 +5649,8 @@ F: drivers/net/ethernet/freescale/fec.h F: Documentation/devicetree/bindings/net/fsl-fec.txt FREESCALE IMX / MXC FRAMEBUFFER DRIVER -M: Sascha Hauer <kernel@pengutronix.de> +M: Sascha Hauer <s.hauer@pengutronix.de> +R: Pengutronix Kernel Team <kernel@pengutronix.de> L: linux-fbdev@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -5784,6 +5782,14 @@ F: fs/crypto/ F: include/linux/fscrypt*.h F: Documentation/filesystems/fscrypt.rst +FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE +M: Jan Kara <jack@suse.cz> +R: Amir Goldstein <amir73il@gmail.com> +L: linux-fsdevel@vger.kernel.org +S: Maintained +F: fs/notify/ +F: include/linux/fsnotify*.h + FUJITSU LAPTOP EXTRAS M: Jonathan Woithe <jwoithe@just42.net> L: platform-driver-x86@vger.kernel.org @@ -6256,7 +6262,7 @@ S: Odd Fixes F: drivers/media/usb/hdpvr/ HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER -M: Jimmy Vance <jimmy.vance@hpe.com> +M: Jerry Hoemann <jerry.hoemann@hpe.com> S: Supported F: Documentation/watchdog/hpwdt.txt F: drivers/watchdog/hpwdt.c @@ -7396,16 +7402,6 @@ S: Obsolete F: include/uapi/linux/ipx.h F: drivers/staging/ipx/ -IRDA SUBSYSTEM -M: Samuel Ortiz <samuel@sortiz.org> -L: irda-users@lists.sourceforge.net (subscribers-only) -L: netdev@vger.kernel.org -W: http://irda.sourceforge.net/ -S: Obsolete -T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git -F: Documentation/networking/irda.txt -F: drivers/staging/irda/ - IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) M: Marc Zyngier <marc.zyngier@arm.com> S: Maintained @@ -7670,9 +7666,11 @@ L: linux-kbuild@vger.kernel.org S: Maintained F: Documentation/kbuild/ F: Makefile -F: scripts/Makefile.* +F: scripts/Kbuild* +F: scripts/Makefile* F: scripts/basic/ F: scripts/mk* +F: scripts/mod/ F: scripts/package/ KERNEL JANITORS @@ -7697,7 +7695,6 @@ F: include/linux/sunrpc/ F: include/uapi/linux/sunrpc/ KERNEL SELFTEST FRAMEWORK -M: Shuah Khan <shuahkh@osg.samsung.com> M: Shuah Khan <shuah@kernel.org> L: linux-kselftest@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git @@ -7738,7 +7735,7 @@ F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) -M: Christoffer Dall <christoffer.dall@linaro.org> +M: Christoffer Dall <christoffer.dall@arm.com> M: Marc Zyngier <marc.zyngier@arm.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu @@ -7752,7 +7749,7 @@ F: virt/kvm/arm/ F: include/kvm/arm_* KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) -M: Christoffer Dall <christoffer.dall@linaro.org> +M: Christoffer Dall <christoffer.dall@arm.com> M: Marc Zyngier <marc.zyngier@arm.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu @@ -8048,6 +8045,9 @@ F: tools/lib/lockdep/ LIBNVDIMM BLK: MMIO-APERTURE DRIVER M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Dan Williams <dan.j.williams@intel.com> +M: Vishal Verma <vishal.l.verma@intel.com> +M: Dave Jiang <dave.jiang@intel.com> L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported @@ -8056,6 +8056,9 @@ F: drivers/nvdimm/region_devs.c LIBNVDIMM BTT: BLOCK TRANSLATION TABLE M: Vishal Verma <vishal.l.verma@intel.com> +M: Dan Williams <dan.j.williams@intel.com> +M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Dave Jiang <dave.jiang@intel.com> L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported @@ -8063,6 +8066,9 @@ F: drivers/nvdimm/btt* LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Dan Williams <dan.j.williams@intel.com> +M: Vishal Verma <vishal.l.verma@intel.com> +M: Dave Jiang <dave.jiang@intel.com> L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported @@ -8078,6 +8084,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM M: Dan Williams <dan.j.williams@intel.com> +M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Vishal Verma <vishal.l.verma@intel.com> +M: Dave Jiang <dave.jiang@intel.com> L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git @@ -8852,7 +8861,6 @@ F: Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt F: drivers/staging/media/tegra-vde/ MEDIA INPUT INFRASTRUCTURE (V4L/DVB) -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> P: LinuxTV.org Project L: linux-media@vger.kernel.org @@ -9706,6 +9714,7 @@ W: https://fedorahosted.org/dropwatch/ F: net/core/drop_monitor.c NETWORKING DRIVERS +M: "David S. Miller" <davem@davemloft.net> L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ @@ -9765,6 +9774,7 @@ F: include/uapi/linux/net_namespace.h F: tools/testing/selftests/net/ F: lib/net_utils.c F: lib/random32.c +F: Documentation/networking/ NETWORKING [IPSEC] M: Steffen Klassert <steffen.klassert@secunet.com> @@ -9861,7 +9871,7 @@ F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/ NFS, SUNRPC, AND LOCKD CLIENTS -M: Trond Myklebust <trond.myklebust@primarydata.com> +M: Trond Myklebust <trond.myklebust@hammerspace.com> M: Anna Schumaker <anna.schumaker@netapp.com> L: linux-nfs@vger.kernel.org W: http://client.linux-nfs.org @@ -10881,7 +10891,6 @@ F: drivers/pci/host/ F: drivers/pci/dwc/ PCIE DRIVER FOR AXIS ARTPEC -M: Niklas Cassel <niklas.cassel@axis.com> M: Jesper Nilsson <jesper.nilsson@axis.com> L: linux-arm-kernel@axis.com L: linux-pci@vger.kernel.org @@ -12240,7 +12249,6 @@ S: Odd Fixes F: drivers/media/i2c/saa6588* SAA7134 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -12479,6 +12487,7 @@ F: drivers/scsi/st_*.h SCTP PROTOCOL M: Vlad Yasevich <vyasevich@gmail.com> M: Neil Horman <nhorman@tuxdriver.com> +M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> L: linux-sctp@vger.kernel.org W: http://lksctp.sourceforge.net S: Maintained @@ -12744,7 +12753,6 @@ S: Maintained F: drivers/media/radio/si4713/radio-usb-si4713.c SIANO DVB DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -12816,7 +12824,8 @@ F: include/linux/siphash.h SIOX M: Gavin Schenk <g.schenk@eckelmann.de> -M: Uwe Kleine-König <kernel@pengutronix.de> +M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> +R: Pengutronix Kernel Team <kernel@pengutronix.de> S: Supported F: drivers/siox/* F: include/trace/events/siox.h @@ -13734,7 +13743,6 @@ S: Maintained F: drivers/media/i2c/tda9840* TEA5761 TUNER DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -13743,7 +13751,6 @@ S: Odd fixes F: drivers/media/tuners/tea5761.* TEA5767 TUNER DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -13833,7 +13840,6 @@ S: Supported F: drivers/iommu/tegra* TEGRA KBC DRIVER -M: Rakesh Iyer <riyer@nvidia.com> M: Laxman Dewangan <ldewangan@nvidia.com> S: Supported F: drivers/input/keyboard/tegra-kbc.c @@ -13936,7 +13942,7 @@ THUNDERBOLT DRIVER M: Andreas Noever <andreas.noever@gmail.com> M: Michael Jamet <michael.jamet@intel.com> M: Mika Westerberg <mika.westerberg@linux.intel.com> -M: Yehezkel Bernat <yehezkel.bernat@intel.com> +M: Yehezkel Bernat <YehezkelShB@gmail.com> T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git S: Maintained F: Documentation/admin-guide/thunderbolt.rst @@ -13946,7 +13952,7 @@ F: include/linux/thunderbolt.h THUNDERBOLT NETWORK DRIVER M: Michael Jamet <michael.jamet@intel.com> M: Mika Westerberg <mika.westerberg@linux.intel.com> -M: Yehezkel Bernat <yehezkel.bernat@intel.com> +M: Yehezkel Bernat <YehezkelShB@gmail.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/thunderbolt.c @@ -14160,7 +14166,6 @@ F: Documentation/networking/tlan.txt F: drivers/net/ethernet/ti/tlan.* TM6000 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -14643,7 +14648,6 @@ F: drivers/usb/common/usb-otg-fsm.c USB OVER IP DRIVER M: Valentina Manea <valentina.manea.m@gmail.com> -M: Shuah Khan <shuahkh@osg.samsung.com> M: Shuah Khan <shuah@kernel.org> L: linux-usb@vger.kernel.org S: Maintained @@ -15387,7 +15391,6 @@ S: Maintained F: arch/x86/entry/vdso/ XC2028/3028 TUNER DRIVER -M: Mauro Carvalho Chehab <mchehab@s-opensource.com> M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org W: https://linuxtv.org @@ -2,8 +2,8 @@ VERSION = 4 PATCHLEVEL = 17 SUBLEVEL = 0 -EXTRAVERSION = -rc1 -NAME = Fearless Coyote +EXTRAVERSION = -rc5 +NAME = Merciless Moray # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/Kconfig b/arch/Kconfig index 8e0d665c8d53..75dd23acf133 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -464,6 +464,10 @@ config GCC_PLUGIN_LATENT_ENTROPY config GCC_PLUGIN_STRUCTLEAK bool "Force initialization of variables containing userspace addresses" depends on GCC_PLUGINS + # Currently STRUCTLEAK inserts initialization out of live scope of + # variables from KASAN point of view. This leads to KASAN false + # positive reports. Prohibit this combination for now. + depends on !KASAN_EXTRA help This plugin zero-initializes any structures containing a __user attribute. This can prevent some classes of information diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts index 8bbb6f85d161..4785fbcc41ed 100644 --- a/arch/arm/boot/dts/gemini-nas4220b.dts +++ b/arch/arm/boot/dts/gemini-nas4220b.dts @@ -134,37 +134,37 @@ function = "gmii"; groups = "gmii_gmac0_grp"; }; - /* Settings come from OpenWRT */ + /* Settings come from OpenWRT, pins on SL3516 */ conf0 { - pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV"; + pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV"; skew-delay = <0>; }; conf1 { - pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC"; + pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC"; skew-delay = <15>; }; conf2 { - pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN"; + pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN"; skew-delay = <7>; }; conf3 { - pins = "V7 GMAC0 TXC"; + pins = "U8 GMAC0 TXC"; skew-delay = <11>; }; conf4 { - pins = "P10 GMAC1 TXC"; + pins = "V11 GMAC1 TXC"; skew-delay = <10>; }; conf5 { /* The data lines all have default skew */ - pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1", - "P9 GMAC0 RXD2", "R9 GMAC0 RXD3", - "U7 GMAC0 TXD0", "T7 GMAC0 TXD1", - "R7 GMAC0 TXD2", "P7 GMAC0 TXD3", - "R11 GMAC1 RXD0", "P11 GMAC1 RXD1", - "V12 GMAC1 RXD2", "U12 GMAC1 RXD3", - "R10 GMAC1 TXD0", "T10 GMAC1 TXD1", - "U10 GMAC1 TXD2", "V10 GMAC1 TXD3"; + pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1", + "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3", + "T7 GMAC0 TXD0", "U6 GMAC0 TXD1", + "V7 GMAC0 TXD2", "U7 GMAC0 TXD3", + "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1", + "T11 GMAC1 RXD2", "W12 GMAC1 RXD3", + "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1", + "W10 GMAC1 TXD2", "T9 GMAC1 TXD3"; skew-delay = <7>; }; /* Set up drive strength on GMAC0 to 16 mA */ diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index bf343195697e..54111ed218b1 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi @@ -303,7 +303,7 @@ }; can1: can@53fe4000 { - compatible = "fsl,imx35-flexcan"; + compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan"; reg = <0x53fe4000 0x1000>; clocks = <&clks 33>, <&clks 33>; clock-names = "ipg", "per"; @@ -312,7 +312,7 @@ }; can2: can@53fe8000 { - compatible = "fsl,imx35-flexcan"; + compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan"; reg = <0x53fe8000 0x1000>; clocks = <&clks 34>, <&clks 34>; clock-names = "ipg", "per"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 7d647d043f52..3d65c0192f69 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -551,7 +551,7 @@ }; can1: can@53fc8000 { - compatible = "fsl,imx53-flexcan"; + compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan"; reg = <0x53fc8000 0x4000>; interrupts = <82>; clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, @@ -561,7 +561,7 @@ }; can2: can@53fcc000 { - compatible = "fsl,imx53-flexcan"; + compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan"; reg = <0x53fcc000 0x4000>; interrupts = <83>; clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>, diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 475904894b86..e554b6e039f3 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -163,10 +163,10 @@ cm2: cm2@8000 { compatible = "ti,omap4-cm2", "simple-bus"; - reg = <0x8000 0x3000>; + reg = <0x8000 0x2000>; #address-cells = <1>; #size-cells = <1>; - ranges = <0 0x8000 0x3000>; + ranges = <0 0x8000 0x2000>; cm2_clocks: clocks { #address-cells = <1>; @@ -250,11 +250,11 @@ prm: prm@6000 { compatible = "ti,omap4-prm"; - reg = <0x6000 0x3000>; + reg = <0x6000 0x2000>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <1>; - ranges = <0 0x6000 0x3000>; + ranges = <0 0x6000 0x2000>; prm_clocks: clocks { #address-cells = <1>; diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig index 2a63fa10c813..553777ac2814 100644 --- a/arch/arm/configs/gemini_defconfig +++ b/arch/arm/configs/gemini_defconfig @@ -1,6 +1,7 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_USER_NS=y CONFIG_RELAY=y @@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y CONFIG_PCI=y CONFIG_PREEMPT=y CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_CMA=y CONFIG_CMDLINE="console=ttyS0,115200n8" CONFIG_KEXEC=y CONFIG_BINFMT_MISC=y CONFIG_PM=y +CONFIG_NET=y +CONFIG_UNIX=y +CONFIG_INET=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_STAA=y @@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y CONFIG_PATA_FTIDE010=y +CONFIG_NETDEVICES=y +CONFIG_GEMINI_ETHERNET=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_GPIO=y +CONFIG_REALTEK_PHY=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set @@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set +CONFIG_I2C_GPIO=y +CONFIG_SPI=y +CONFIG_SPI_GPIO=y +CONFIG_SENSORS_GPIO_FAN=y +CONFIG_SENSORS_LM75=y +CONFIG_THERMAL=y CONFIG_WATCHDOG=y -CONFIG_GEMINI_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_DRM=y +CONFIG_DRM_PANEL_ILITEK_IL9322=y +CONFIG_DRM_TVE200=y +CONFIG_LOGO=y CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_FOTG210_HCD=y @@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y CONFIG_DMADEVICES=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 2620ce790db0..371fca4e1ab7 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_DENALI_DT=y CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set CONFIG_SPI_CADENCE_QUADSPI=y CONFIG_OF_OVERLAY=y CONFIG_OF_CONFIGFS=y diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c6a749568dd6..c7c28c885a19 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -77,6 +77,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; int max_vcpus; + + /* Mandated version of PSCI */ + u32 psci_version; }; #define KVM_NR_MEM_OBJS 40 diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 2ba95d6fe852..caae4843cb70 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -195,6 +195,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 1e0784ebbfd6..a18f33edc471 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> +#include <kvm/arm_psci.h> #include <asm/cputype.h> #include <linux/uaccess.h> #include <asm/kvm.h> @@ -176,6 +177,7 @@ static unsigned long num_core_regs(void) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; } @@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += kvm_arm_get_fw_num_regs(vcpu); + ret = copy_timer_indices(vcpu, uindices); if (ret) return ret; @@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_get_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_set_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 4603c30fef73..0d9ce58bc464 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) -# For rule to generate ti-emif-asm-offsets.h dependency -include drivers/memory/Makefile.asm-offsets - -arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h -arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h +$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h diff --git a/arch/arm/mach-omap2/pm-asm-offsets.c b/arch/arm/mach-omap2/pm-asm-offsets.c index 6d4392da7c11..b9846b19e5e2 100644 --- a/arch/arm/mach-omap2/pm-asm-offsets.c +++ b/arch/arm/mach-omap2/pm-asm-offsets.c @@ -7,9 +7,12 @@ #include <linux/kbuild.h> #include <linux/platform_data/pm33xx.h> +#include <linux/ti-emif-sram.h> int main(void) { + ti_emif_asm_offsets(); + DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, offsetof(struct am33xx_pm_sram_data, wfi_flags)); DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET, diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S index 218d79930b04..322b3bb868b4 100644 --- a/arch/arm/mach-omap2/sleep33xx.S +++ b/arch/arm/mach-omap2/sleep33xx.S @@ -6,7 +6,6 @@ * Dave Gerlach, Vaibhav Bedia */ -#include <generated/ti-emif-asm-offsets.h> #include <generated/ti-pm-asm-offsets.h> #include <linux/linkage.h> #include <linux/ti-emif-sram.h> diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S index b24be624e8b9..8903814a6677 100644 --- a/arch/arm/mach-omap2/sleep43xx.S +++ b/arch/arm/mach-omap2/sleep43xx.S @@ -6,7 +6,6 @@ * Dave Gerlach, Vaibhav Bedia */ -#include <generated/ti-emif-asm-offsets.h> #include <generated/ti-pm-asm-offsets.h> #include <linux/linkage.h> #include <linux/ti-emif-sram.h> diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index 59589a4a0d4b..885e8f12e4b9 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c @@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = { .dev_id = "spi_gpio", .table = { GPIO_LOOKUP("GPIOB", 4, - "gpio-sck", GPIO_ACTIVE_HIGH), + "sck", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("GPIOB", 9, - "gpio-mosi", GPIO_ACTIVE_HIGH), + "mosi", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("GPIOH", 10, "cs", GPIO_ACTIVE_HIGH), { }, diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 15402861bb59..87f7d2f9f17c 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) +ifeq ($(cc-name),clang) +KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128 +else KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) +endif ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index 4eef36b22538..88e712ea757a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -212,3 +212,7 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 22bf37404ff1..3e3eb31748a3 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -271,3 +271,15 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; + +&usb2_phy0 { + /* + * even though the schematics don't show it: + * HDMI_5V is also used as supply for the USB VBUS. + */ + phy-supply = <&hdmi_5v>; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 69c721a70e44..6739697be1de 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -215,3 +215,7 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index 0a0953fbc7d4..0cfd701809de 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -185,3 +185,7 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index e1a39cbed8c9..dba365ed4bd5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -20,6 +20,67 @@ no-map; }; }; + + soc { + usb0: usb@c9000000 { + status = "disabled"; + compatible = "amlogic,meson-gxl-dwc3"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks = <&clkc CLKID_USB>; + clock-names = "usb_general"; + resets = <&reset RESET_USB_OTG>; + reset-names = "usb_otg"; + + dwc3: dwc3@c9000000 { + compatible = "snps,dwc3"; + reg = <0x0 0xc9000000 0x0 0x100000>; + interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; + dr_mode = "host"; + maximum-speed = "high-speed"; + snps,dis_u2_susphy_quirk; + phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>; + }; + }; + }; +}; + +&apb { + usb2_phy0: phy@78000 { + compatible = "amlogic,meson-gxl-usb2-phy"; + #phy-cells = <0>; + reg = <0x0 0x78000 0x0 0x20>; + clocks = <&clkc CLKID_USB>; + clock-names = "phy"; + resets = <&reset RESET_USB_OTG>; + reset-names = "phy"; + status = "okay"; + }; + + usb2_phy1: phy@78020 { + compatible = "amlogic,meson-gxl-usb2-phy"; + #phy-cells = <0>; + reg = <0x0 0x78020 0x0 0x20>; + clocks = <&clkc CLKID_USB>; + clock-names = "phy"; + resets = <&reset RESET_USB_OTG>; + reset-names = "phy"; + status = "okay"; + }; + + usb3_phy: phy@78080 { + compatible = "amlogic,meson-gxl-usb3-phy"; + #phy-cells = <0>; + reg = <0x0 0x78080 0x0 0x20>; + interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>; + clock-names = "phy", "peripheral"; + resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>; + reset-names = "phy", "peripheral"; + status = "okay"; + }; }; ðmac { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 4fd46c1546a7..0868da476e41 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -406,3 +406,7 @@ status = "okay"; vref-supply = <&vddio_ao18>; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi index d076a7c425dd..247888d68a3a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi @@ -80,6 +80,19 @@ }; }; +&apb { + usb2_phy2: phy@78040 { + compatible = "amlogic,meson-gxl-usb2-phy"; + #phy-cells = <0>; + reg = <0x0 0x78040 0x0 0x20>; + clocks = <&clkc CLKID_USB>; + clock-names = "phy"; + resets = <&reset RESET_USB_OTG>; + reset-names = "phy"; + status = "okay"; + }; +}; + &clkc_AO { compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; }; @@ -100,3 +113,7 @@ &hdmi_tx { compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; }; + +&dwc3 { + phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>; +}; diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi index 2ac43221ddb6..69804c5f1197 100644 --- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi @@ -56,8 +56,6 @@ gpio_keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; power-button { debounce_interval = <50>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi index 4b5465da81d8..8c68e0c26f1b 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi @@ -36,11 +36,11 @@ #size-cells = <1>; ranges = <0x0 0x0 0x67d00000 0x00800000>; - sata0: ahci@210000 { + sata0: ahci@0 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00210000 0x1000>; + reg = <0x00000000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -52,9 +52,9 @@ }; }; - sata_phy0: sata_phy@212100 { + sata_phy0: sata_phy@2100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00212100 0x1000>; + reg = <0x00002100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -66,11 +66,11 @@ }; }; - sata1: ahci@310000 { + sata1: ahci@10000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00310000 0x1000>; + reg = <0x00010000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -82,9 +82,9 @@ }; }; - sata_phy1: sata_phy@312100 { + sata_phy1: sata_phy@12100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00312100 0x1000>; + reg = <0x00012100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -96,11 +96,11 @@ }; }; - sata2: ahci@120000 { + sata2: ahci@20000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00120000 0x1000>; + reg = <0x00020000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -112,9 +112,9 @@ }; }; - sata_phy2: sata_phy@122100 { + sata_phy2: sata_phy@22100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00122100 0x1000>; + reg = <0x00022100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -126,11 +126,11 @@ }; }; - sata3: ahci@130000 { + sata3: ahci@30000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00130000 0x1000>; + reg = <0x00030000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -142,9 +142,9 @@ }; }; - sata_phy3: sata_phy@132100 { + sata_phy3: sata_phy@32100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00132100 0x1000>; + reg = <0x00032100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -156,11 +156,11 @@ }; }; - sata4: ahci@330000 { + sata4: ahci@100000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00330000 0x1000>; + reg = <0x00100000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -172,9 +172,9 @@ }; }; - sata_phy4: sata_phy@332100 { + sata_phy4: sata_phy@102100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00332100 0x1000>; + reg = <0x00102100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -186,11 +186,11 @@ }; }; - sata5: ahci@400000 { + sata5: ahci@110000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00400000 0x1000>; + reg = <0x00110000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -202,9 +202,9 @@ }; }; - sata_phy5: sata_phy@402100 { + sata_phy5: sata_phy@112100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00402100 0x1000>; + reg = <0x00112100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -216,11 +216,11 @@ }; }; - sata6: ahci@410000 { + sata6: ahci@120000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00410000 0x1000>; + reg = <0x00120000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -232,9 +232,9 @@ }; }; - sata_phy6: sata_phy@412100 { + sata_phy6: sata_phy@122100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00412100 0x1000>; + reg = <0x00122100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -246,11 +246,11 @@ }; }; - sata7: ahci@420000 { + sata7: ahci@130000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00420000 0x1000>; + reg = <0x00130000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -262,9 +262,9 @@ }; }; - sata_phy7: sata_phy@422100 { + sata_phy7: sata_phy@132100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00422100 0x1000>; + reg = <0x00132100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 30014a9f8f2b..ea690b3562af 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -75,6 +75,7 @@ #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 +#define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -99,6 +100,9 @@ #define QCOM_CPU_PART_FALKOR 0xC00 #define QCOM_CPU_PART_KRYO 0x200 +#define NVIDIA_CPU_PART_DENVER 0x003 +#define NVIDIA_CPU_PART_CARMEL 0x004 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -114,6 +118,8 @@ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) +#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) +#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 23b33e8ea03a..1dab3a984608 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -333,7 +333,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); sctlr |= (1 << 25); - vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr); + vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); } } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index ab46bc70add6..469de8acd06f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -75,6 +75,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; + + /* Mandated version of PSCI */ + u32 psci_version; }; #define KVM_NR_MEM_OBJS 40 diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index b6dbbe3123a9..97d0ef12e2ff 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -39,7 +39,7 @@ struct mod_arch_specific { u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, Elf64_Sym *sym); -u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); +u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); #ifdef CONFIG_RANDOMIZE_BASE extern u64 module_alloc_base; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7e2c27e63cd8..7c4c8f318ba9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } -extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); +extern void __sync_icache_dcache(pte_t pteval); /* * PTE bits configuration in the presence of hardware Dirty Bit Management @@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t old_pte; if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) - __sync_icache_dcache(pte, addr); + __sync_icache_dcache(pte); /* * If the existing pte is valid, check for potential race with diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 9abbf3044654..04b3256f8e6d 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -206,6 +206,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a900befadfe8..e4a1182deff7 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -316,6 +316,7 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), {}, }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 536d572e5596..9d1b06d67c53 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static const struct midr_range kpti_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + { /* sentinel */ } }; char const *str = "command line option"; diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index fa3637284a3d..f0690c2ca3e0 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, } #ifdef CONFIG_ARM64_ERRATUM_843419 -u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) +u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) { struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : &mod->arch.init; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 719fde8dcc19..155fd91e78f4 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) insn &= ~BIT(31); } else { /* out of range for ADR -> emit a veneer */ - val = module_emit_adrp_veneer(mod, place, val & ~0xfff); + val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff); if (!val) return -ENOEXEC; insn = aarch64_insn_gen_branch_imm((u64)place, val, diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 71d99af24ef2..7ff81fed46e1 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -25,6 +25,7 @@ #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> +#include <linux/nospec.h> #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/user.h> @@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, switch (note_type) { case NT_ARM_HW_BREAK: - if (idx < ARM_MAX_BRP) - bp = tsk->thread.debug.hbp_break[idx]; + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + bp = tsk->thread.debug.hbp_break[idx]; break; case NT_ARM_HW_WATCH: - if (idx < ARM_MAX_WRP) - bp = tsk->thread.debug.hbp_watch[idx]; + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + bp = tsk->thread.debug.hbp_watch[idx]; break; } +out: return bp; } @@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); /* Watchpoint */ if (num < 0) { ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); @@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, } else { ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); } - set_fs(old_fs); if (!ret) ret = put_user(kdata, data); @@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata = 0; - mm_segment_t old_fs = get_fs(); if (num == 0) return 0; @@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, if (ret) return ret; - set_fs(KERNEL_DS); if (num < 0) ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); else ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); - set_fs(old_fs); return ret; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ba964da31a25..8bbdc17e49df 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -277,7 +277,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) * If we were single stepping, we want to get the step exception after * we return from the trap. */ - user_fastforward_single_step(current); + if (user_mode(regs)) + user_fastforward_single_step(current); } static LIST_HEAD(undef_hook); @@ -366,7 +367,7 @@ void force_signal_inject(int signal, int code, unsigned long address) } /* Force signals we don't understand to SIGKILL */ - if (WARN_ON(signal != SIGKILL || + if (WARN_ON(signal != SIGKILL && siginfo_layout(signal, code) != SIL_FAULT)) { signal = SIGKILL; } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 959e50d2588c..56a0260ceb11 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> +#include <kvm/arm_psci.h> #include <asm/cputype.h> #include <linux/uaccess.h> #include <asm/kvm.h> @@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) - + NUM_TIMER_REGS; + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; } /** @@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += kvm_arm_get_fw_num_regs(vcpu); + ret = copy_timer_indices(vcpu, uindices); if (ret) return ret; @@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_get_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_set_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 86801b6055d6..39be799d0417 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -18,11 +18,20 @@ #include <linux/compiler.h> #include <linux/irqchip/arm-gic.h> #include <linux/kvm_host.h> +#include <linux/swab.h> #include <asm/kvm_emulate.h> #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> +static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) +{ + if (vcpu_mode_is_32bit(vcpu)) + return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT); + + return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); +} + /* * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the * guest. @@ -64,14 +73,19 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { - u32 data = vcpu_data_guest_to_host(vcpu, - vcpu_get_reg(vcpu, rd), - sizeof(u32)); + u32 data = vcpu_get_reg(vcpu, rd); + if (__is_be(vcpu)) { + /* guest pre-swabbed data, undo this for writel() */ + data = swab32(data); + } writel_relaxed(data, addr); } else { u32 data = readl_relaxed(addr); - vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data, - sizeof(u32))); + if (__is_be(vcpu)) { + /* guest expects swabbed data */ + data = swab32(data); + } + vcpu_set_reg(vcpu, rd, data); } return 1; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 806b0b126a64..6e3b969391fd 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) if (id == SYS_ID_AA64PFR0_EL1) { if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) - pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", - task_pid_nr(current)); + kvm_debug("SVE unsupported for guests, suppressing\n"); val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); } else if (id == SYS_ID_AA64MMFR1_EL1) { if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) - pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", - task_pid_nr(current)); + kvm_debug("LORegions unsupported for guests, suppressing\n"); val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); } diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 0ead8a1d1679..137710f4dac3 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ -fcall-saved-x18 -fomit-frame-pointer CFLAGS_REMOVE_atomic_ll_sc.o := -pg +GCOV_PROFILE_atomic_ll_sc.o := n +KASAN_SANITIZE_atomic_ll_sc.o := n +KCOV_INSTRUMENT_atomic_ll_sc.o := n +UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index e36ed5087b5c..1059884f9a6f 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, flush_ptrace_access(vma, page, uaddr, dst, len); } -void __sync_icache_dcache(pte_t pte, unsigned long addr) +void __sync_icache_dcache(pte_t pte) { struct page *page = pte_page(pte); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9f3c47acf8ff..1b18b4722420 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -646,8 +646,10 @@ static int keep_initrd __initdata; void __init free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) + if (!keep_initrd) { free_reserved_area((void *)start, (void *)end, 0, "initrd"); + memblock_free(__virt_to_phys(start), end - start); + } } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index dabfc1ecda3d..12145874c02b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -204,7 +204,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, - pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, (void *)mod_shadow_start); @@ -224,7 +224,7 @@ void __init kasan_init(void) kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), (unsigned long)kasan_mem_to_shadow(end), - pfn_to_nid(virt_to_pfn(start))); + early_pfn_to_nid(virt_to_pfn(start))); } /* diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 9e8621d94ee9..e17262ad125e 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, memcpy((void *) dst, src, count); } +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset((void __force *)addr, value, size); +} + #define PCI_IO_ADDR (volatile void __iomem *) /* diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c index 617506d1a559..7cd0a2259269 100644 --- a/arch/hexagon/lib/checksum.c +++ b/arch/hexagon/lib/checksum.c @@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) memcpy(dst, src, len); return csum_partial(dst, len, sum); } +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 1bd105428f61..65af3f6ba81c 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -51,6 +51,8 @@ ranges = <0x02000000 0 0x40000000 0x40000000 0 0x40000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci0_intc 1>, <0 0 0 2 &pci0_intc 2>, @@ -79,6 +81,8 @@ ranges = <0x02000000 0 0x20000000 0x20000000 0 0x20000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci1_intc 1>, <0 0 0 2 &pci1_intc 2>, @@ -107,6 +111,8 @@ ranges = <0x02000000 0 0x16000000 0x16000000 0 0x100000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci2_intc 1>, <0 0 0 2 &pci2_intc 2>, diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..a7d0b836f2f7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) #define war_io_reorder_wmb() wmb() #else -#define war_io_reorder_wmb() do { } while (0) +#define war_io_reorder_wmb() barrier() #endif #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ BUG(); \ } \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__mem, __val); \ } diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b71306947290..06629011a434 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; +#ifdef CONFIG_CPU_MICROMIPS +/* micromips memset / bzero also clobbers t7 & t8 */ +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" +#else +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" +#endif /* CONFIG_CPU_MICROMIPS */ + if (eva_kernel_access()) { __asm__ __volatile__( "move\t$4, %1\n\t" @@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } else { might_fault(); __asm__ __volatile__( @@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } return res; diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index a1456664d6c2..f7327979a8f8 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -219,7 +219,7 @@ 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) bne t1, a0, 1b - sb a1, -1(a0) + EX(sb, a1, -1(a0), .Lsmall_fixup\@) 2: jr ra /* done */ move a2, zero @@ -252,13 +252,18 @@ PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) - LONG_ADDU a2, t1 + LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Llast_fixup\@: jr ra - andi v1, a2, STORMASK + nop + +.Lsmall_fixup\@: + PTR_SUBU a2, t1, a0 + jr ra + PTR_ADDIU a2, 1 .endm diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index e2364ff59180..34ac503e28ad 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -123,6 +123,9 @@ INSTALL_TARGETS = zinstall install PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) +# Default kernel to build +all: bzImage + zImage: vmlinuz Image: vmlinux diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index eafd06ab59ef..e5de34d00b1a 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PA11) += pci-dma.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o +obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o obj-$(CONFIG_STACKTRACE)+= stacktrace.o obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 3b8507f71050..ee5a78a151a6 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data) * Checks all the children of @parent for a matching @id. If none * found, it allocates a new device and returns it. */ -static struct parisc_device * alloc_tree_node(struct device *parent, char id) +static struct parisc_device * __init alloc_tree_node( + struct device *parent, char id) { struct match_id_data d = { .id = id, @@ -825,8 +826,8 @@ static void walk_lower_bus(struct parisc_device *dev) * devices which are not physically connected (such as extra serial & * keyboard ports). This problem is not yet solved. */ -static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, - struct device *parent) +static void __init walk_native_bus(unsigned long io_io_low, + unsigned long io_io_high, struct device *parent) { int i, devices_found = 0; unsigned long hpa = io_io_low; diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 13ee3569959a..ae684ac6efb6 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -174,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev) * pcibios_init_bridge() initializes cache line and default latency * for pci controllers and pci-pci bridges */ -void __init pcibios_init_bridge(struct pci_dev *dev) +void __ref pcibios_init_bridge(struct pci_dev *dev) { unsigned short bridge_ctl, bridge_ctl_new; diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index c3830400ca28..a1e772f909cb 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -205,7 +205,7 @@ static int __init rtc_init(void) device_initcall(rtc_init); #endif -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { static struct pdc_tod tod_data; if (pdc_tod_read(&tod_data) == 0) { diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 68e671a11987..71d31274d782 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -837,6 +837,17 @@ void __init initialize_ivt(const void *iva) if (pdc_instr(&instr) == PDC_OK) ivap[0] = instr; + /* + * Rules for the checksum of the HPMC handler: + * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed + * its own IVA). + * 2. The word at IVA + 32 is nonzero. + * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and + * Address (IVA + 56) are word-aligned. + * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of + * the Length/4 words starting at Address is zero. + */ + /* Compute Checksum for HPMC handler */ length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index cab32ee824d2..2607d2d33405 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -516,7 +516,7 @@ static void __init map_pages(unsigned long start_vaddr, } } -void free_initmem(void) +void __ref free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 9abddde372ab..b2dabd06659d 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -69,17 +69,30 @@ struct dyn_arch_ftrace { #endif #if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__) -#ifdef PPC64_ELF_ABI_v1 +/* + * Some syscall entry functions on powerpc start with "ppc_" (fork and clone, + * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with + * those. + */ #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +#ifdef PPC64_ELF_ABI_v1 +static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) +{ + /* We need to skip past the initial dot, and the __se_sys alias */ + return !strcmp(sym + 1, name) || + (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) || + (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) || + (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) || + (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4)); +} +#else static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { - /* - * Compare the symbol name with the system call name. Skip the .sys or .SyS - * prefix from the symbol name and the sys prefix from the system call name and - * just match the rest. This is only needed on ppc64 since symbol names on - * 32bit do not start with a period so the generic function will work. - */ - return !strcmp(sym + 4, name + 3); + return !strcmp(sym, name) || + (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) || + (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) || + (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || + (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); } #endif #endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 4185f1c96125..3f109a3e3edb 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -165,7 +165,6 @@ struct paca_struct { u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ u8 irq_soft_mask; /* mask for irq soft masking */ - u8 soft_enabled; /* irq soft-enable flag */ u8 irq_happened; /* irq happened while soft-disabled */ u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index d1c2d2e658cf..2f3ff7a27881 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -15,7 +15,7 @@ extern void powernv_set_nmmu_ptcr(unsigned long ptcr); extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, unsigned long flags, - struct npu_context *(*cb)(struct npu_context *, void *), + void (*cb)(struct npu_context *, void *), void *priv); extern void pnv_npu2_destroy_context(struct npu_context *context, struct pci_dev *gpdev); diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 9f421641a35c..16b077801a5f 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -91,6 +91,7 @@ extern int start_topology_update(void); extern int stop_topology_update(void); extern int prrn_is_enabled(void); extern int find_and_online_cpu_nid(int cpu); +extern int timed_topology_update(int nsecs); #else static inline int start_topology_update(void) { @@ -108,16 +109,12 @@ static inline int find_and_online_cpu_nid(int cpu) { return 0; } +static inline int timed_topology_update(int nsecs) +{ + return 0; +} #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ -#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES) -#if defined(CONFIG_PPC_SPLPAR) -extern int timed_topology_update(int nsecs); -#else -#define timed_topology_update(nsecs) -#endif /* CONFIG_PPC_SPLPAR */ -#endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */ - #include <asm-generic/topology.h> #ifdef CONFIG_SMP diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 2d4956e97aa9..ee5a67d57aab 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); /* PCI Command: 0x4 */ - eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); + eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* Check the PCIe link is ready */ eeh_bridge_check_link(edev); diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 79d005445c6c..e734f6e45abc 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE lbz r0,HSTATE_HWTHREAD_STATE(r13) cmpwi r0,KVM_HWTHREAD_IN_KERNEL - beq 1f + beq 0f li r0,KVM_HWTHREAD_IN_KERNEL stb r0,HSTATE_HWTHREAD_STATE(r13) /* Order setting hwthread_state vs. testing hwthread_req */ sync - lbz r0,HSTATE_HWTHREAD_REQ(r13) +0: lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beq 1f b kvm_start_guest diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index fe6fc63251fe..38c5b4764bfe 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs, if (pfn != ULONG_MAX) { *phys_addr = (pfn << PAGE_SHIFT); - handled = 1; } } } @@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs, * kernel/exception-64s.h */ if (get_paca()->in_mce < MAX_MCE_DEPTH) - if (!mce_find_instr_ea_and_pfn(regs, addr, - phys_addr)) - handled = 1; + mce_find_instr_ea_and_pfn(regs, addr, phys_addr); } found = 1; } @@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs, const struct mce_ierror_table itable[]) { struct mce_error_info mce_err = { 0 }; - uint64_t addr, phys_addr; + uint64_t addr, phys_addr = ULONG_MAX; uint64_t srr1 = regs->msr; long handled; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 44c30dd38067..b78f142a4148 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void) return; l1d_size = ppc64_caches.l1d.size; + + /* + * If there is no d-cache-size property in the device tree, l1d_size + * could be zero. That leads to the loop in the asm wrapping around to + * 2^64-1, and then walking off the end of the fallback area and + * eventually causing a page fault which is fatal. Just default to + * something vaguely sane. + */ + if (!l1d_size) + l1d_size = (64 * 1024); + limit = min(ppc64_bolted_size(), ppc64_rma_size); /* diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e16ec7b3b427..9ca7148b5881 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) #endif #ifdef CONFIG_NMI_IPI -static void stop_this_cpu(struct pt_regs *regs) -#else +static void nmi_stop_this_cpu(struct pt_regs *regs) +{ + /* + * This is a special case because it never returns, so the NMI IPI + * handling would never mark it as done, which makes any later + * smp_send_nmi_ipi() call spin forever. Mark it done now. + * + * IRQs are already hard disabled by the smp_handle_nmi_ipi. + */ + nmi_ipi_lock(); + nmi_ipi_busy_count--; + nmi_ipi_unlock(); + + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + spin_begin(); + while (1) + spin_cpu_relax(); +} + +void smp_send_stop(void) +{ + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); +} + +#else /* CONFIG_NMI_IPI */ + static void stop_this_cpu(void *dummy) -#endif { /* Remove this CPU */ set_cpu_online(smp_processor_id(), false); @@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy) void smp_send_stop(void) { -#ifdef CONFIG_NMI_IPI - smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); -#else + static bool stopped = false; + + /* + * Prevent waiting on csd lock from a previous smp_send_stop. + * This is racy, but in general callers try to do the right + * thing and only fire off one smp_send_stop (e.g., see + * kernel/panic.c) + */ + if (stopped) + return; + + stopped = true; + smp_call_function(stop_this_cpu, NULL, 0); -#endif } +#endif /* CONFIG_NMI_IPI */ struct thread_info *current_set[NR_CPUS]; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 6038e2e7aee0..876d4f294fdd 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); } +#ifdef CONFIG_ALTIVEC +void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) +{ + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); +} +#endif + void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 35f80ab7cbd8..288fe4f0db4e 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, unsigned int *target = (unsigned int *)branch_target(src); /* Branch within the section doesn't need translating */ - if (target < alt_start || target >= alt_end) { + if (target < alt_start || target > alt_end) { instr = translate_branch(dest, src); if (!instr) return 1; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 737f8a4632cc..c3c39b02b2ba 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap * start, start + size, rc); return -EFAULT; } + flush_inval_dcache_range(start, start + size); return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); } @@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); + flush_inval_dcache_range(start, start + size); ret = remove_section_mapping(start, start + size); /* Ensure all vmalloc mappings are flushed in case they also diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9033c8194eda..ccc421503363 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), - idr_get_cursor(&task_active_pid_ns(current)->idr)); + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index de470caf0784..fc222a0c2ac4 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = { .open = simple_open, }; -static void flush_memory_region(u64 base, u64 size) -{ - unsigned long line_size = ppc64_caches.l1d.size; - u64 end = base + size; - u64 addr; - - base = round_down(base, line_size); - end = round_up(end, line_size); - - for (addr = base; addr < end; addr += line_size) - asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory"); -} - static int check_memblock_online(struct memory_block *mem, void *arg) { if (mem->state != MEM_ONLINE) @@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - /* RCU grace period? */ - flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT), - nr_pages << PAGE_SHIFT); - lock_device_hotplug(); remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); unlock_device_hotplug(); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 69a4f9e8bd55..525e966dce34 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -34,6 +34,19 @@ #define npu_to_phb(x) container_of(x, struct pnv_phb, npu) /* + * spinlock to protect initialisation of an npu_context for a particular + * mm_struct. + */ +static DEFINE_SPINLOCK(npu_context_lock); + +/* + * When an address shootdown range exceeds this threshold we invalidate the + * entire TLB on the GPU for the given PID rather than each specific address in + * the range. + */ +#define ATSD_THRESHOLD (2*1024*1024) + +/* * Other types of TCE cache invalidation are not functional in the * hardware. */ @@ -401,7 +414,7 @@ struct npu_context { bool nmmu_flush; /* Callback to stop translation requests on a given GPU */ - struct npu_context *(*release_cb)(struct npu_context *, void *); + void (*release_cb)(struct npu_context *context, void *priv); /* * Private pointer passed to the above callback for usage by @@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct npu_context *npu_context = mn_to_npu_context(mn); unsigned long address; - for (address = start; address < end; address += PAGE_SIZE) - mmio_invalidate(npu_context, 1, address, false); + if (end - start > ATSD_THRESHOLD) { + /* + * Just invalidate the entire PID if the address range is too + * large. + */ + mmio_invalidate(npu_context, 0, 0, true); + } else { + for (address = start; address < end; address += PAGE_SIZE) + mmio_invalidate(npu_context, 1, address, false); - /* Do the flush only on the final addess == end */ - mmio_invalidate(npu_context, 1, address, true); + /* Do the flush only on the final addess == end */ + mmio_invalidate(npu_context, 1, address, true); + } } static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { @@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { * Returns an error if there no contexts are currently available or a * npu_context which should be passed to pnv_npu2_handle_fault(). * - * mmap_sem must be held in write mode. + * mmap_sem must be held in write mode and must not be called from interrupt + * context. */ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, unsigned long flags, - struct npu_context *(*cb)(struct npu_context *, void *), + void (*cb)(struct npu_context *, void *), void *priv) { int rc; @@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, /* * Setup the NPU context table for a particular GPU. These need to be * per-GPU as we need the tables to filter ATSDs when there are no - * active contexts on a particular GPU. + * active contexts on a particular GPU. It is safe for these to be + * called concurrently with destroy as the OPAL call takes appropriate + * locks and refcounts on init/destroy. */ rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); @@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, * We store the npu pci device so we can more easily get at the * associated npus. */ + spin_lock(&npu_context_lock); npu_context = mm->context.npu_context; + if (npu_context) { + if (npu_context->release_cb != cb || + npu_context->priv != priv) { + spin_unlock(&npu_context_lock); + opal_npu_destroy_context(nphb->opal_id, mm->context.id, + PCI_DEVID(gpdev->bus->number, + gpdev->devfn)); + return ERR_PTR(-EINVAL); + } + + WARN_ON(!kref_get_unless_zero(&npu_context->kref)); + } + spin_unlock(&npu_context_lock); + if (!npu_context) { + /* + * We can set up these fields without holding the + * npu_context_lock as the npu_context hasn't been returned to + * the caller meaning it can't be destroyed. Parallel allocation + * is protected against by mmap_sem. + */ rc = -ENOMEM; npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); if (npu_context) { @@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, } mm->context.npu_context = npu_context; - } else { - WARN_ON(!kref_get_unless_zero(&npu_context->kref)); } npu_context->release_cb = cb; @@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref) mm_context_remove_copro(npu_context->mm); npu_context->mm->context.npu_context = NULL; - mmu_notifier_unregister(&npu_context->mn, - npu_context->mm); - - kfree(npu_context); } +/* + * Destroy a context on the given GPU. May free the npu_context if it is no + * longer active on any GPUs. Must not be called from interrupt context. + */ void pnv_npu2_destroy_context(struct npu_context *npu_context, struct pci_dev *gpdev) { + int removed; struct pnv_phb *nphb; struct npu *npu; struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); @@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); - kref_put(&npu_context->kref, pnv_npu2_release_context); + spin_lock(&npu_context_lock); + removed = kref_put(&npu_context->kref, pnv_npu2_release_context); + spin_unlock(&npu_context_lock); + + /* + * We need to do this outside of pnv_npu2_release_context so that it is + * outside the spinlock as mmu_notifier_destroy uses SRCU. + */ + if (removed) { + mmu_notifier_unregister(&npu_context->mn, + npu_context->mm); + + kfree(npu_context); + } + } EXPORT_SYMBOL(pnv_npu2_destroy_context); diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index f8868864f373..aa2a5139462e 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + mdelay(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (rc == OPAL_BUSY) - mdelay(10); + } else if (rc == OPAL_BUSY) { + mdelay(OPAL_BUSY_DELAY_MS); + } } if (rc != OPAL_SUCCESS) return 0; diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index d22aeb0b69e1..b48454be5b98 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) if (xive_pool_vps == XIVE_INVALID_VP) return; + /* Check if pool VP already active, if it is, pull it */ + if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) + in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); + /* Enable the pool VP */ vp = xive_pool_vps + cpu; pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 23d8acca5c90..cd4fd85fde84 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -11,6 +11,7 @@ config RISCV select ARCH_WANT_FRAME_POINTERS select CLONE_BACKWARDS select COMMON_CLK + select DMA_DIRECT_OPS select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES select GENERIC_IRQ_SHOW @@ -89,9 +90,6 @@ config PGTABLE_LEVELS config HAVE_KPROBES def_bool n -config DMA_DIRECT_OPS - def_bool y - menu "Platform type" choice diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 1e5fd280fb4d..4286a5f83876 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -15,7 +15,6 @@ generic-y += fcntl.h generic-y += futex.h generic-y += hardirq.h generic-y += hash.h -generic-y += handle_irq.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 324568d33921..f6561b783b61 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. # Make sure only to export the intended __vdso_xxx symbol offsets. quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ + cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ $(CROSS_COMPILE)objcopy \ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 9fdff3fe1a42..e63940bb57cd 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -8,3 +8,4 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_NUMA) += numa/ +obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 32a0d5b958bf..199ac3e4da1d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -47,10 +47,6 @@ config PGSTE config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config KEXEC - def_bool y - select KEXEC_CORE - config AUDIT_ARCH def_bool y @@ -290,12 +286,12 @@ config MARCH_Z13 older machines. config MARCH_Z14 - bool "IBM z14" + bool "IBM z14 ZR1 and z14" select HAVE_MARCH_Z14_FEATURES help - Select this to enable optimizations for IBM z14 (3906 series). - The kernel will be slightly faster but will not work on older - machines. + Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 + and 3906 series). The kernel will be slightly faster but will not + work on older machines. endchoice @@ -525,6 +521,26 @@ source kernel/Kconfig.preempt source kernel/Kconfig.hz +config KEXEC + def_bool y + select KEXEC_CORE + +config KEXEC_FILE + bool "kexec file based system call" + select KEXEC_CORE + select BUILD_BIN2C + depends on CRYPTO + depends on CRYPTO_SHA256 + depends on CRYPTO_SHA256_S390 + help + Enable the kexec file based system call. In contrast to the normal + kexec system call this system call takes file descriptors for the + kernel and initramfs as arguments. + +config ARCH_HAS_KEXEC_PURGATORY + def_bool y + depends on KEXEC_FILE + config ARCH_RANDOM def_bool y prompt "s390 architectural random number generation API" diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index da9dad35c28e..d1fa37fcce83 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -3,12 +3,6 @@ # Makefile for the linux s390-specific parts of the memory manager. # -COMPILE_VERSION := __linux_compile_version_id__`hostname | \ - tr -c '[0-9A-Za-z]' '_'`__`date | \ - tr -c '[0-9A-Za-z]' '_'`_t - -ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. - targets := image targets += bzImage subdir- := compressed diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore index ae06b9b4c02f..2088cc140629 100644 --- a/arch/s390/boot/compressed/.gitignore +++ b/arch/s390/boot/compressed/.gitignore @@ -1,3 +1,4 @@ sizes.h vmlinux vmlinux.lds +vmlinux.bin.full diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/debug_defconfig index 5af8458951cf..6176fe9795ca 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -24,13 +24,13 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -59,10 +59,11 @@ CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=256 +CONFIG_NR_CPUS=512 CONFIG_NUMA=y CONFIG_PREEMPT=y CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -305,7 +306,6 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m @@ -364,11 +364,11 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_OPENVSWITCH=m CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 @@ -380,9 +380,9 @@ CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m CONFIG_ENCLOSURE_SERVICES=m CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m @@ -461,6 +461,7 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -474,6 +475,9 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FRAMEBUFFER_CONSOLE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -482,7 +486,9 @@ CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m CONFIG_VFIO=m CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -641,6 +647,8 @@ CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_S390_PTDUMP=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -649,17 +657,20 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y -CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -707,9 +718,8 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_X509_CERTIFICATE_PARSER=m +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y CONFIG_CRC7=m CONFIG_CRC8=m CONFIG_RANDOM32_SELFTEST=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig deleted file mode 100644 index d52eafe57ae8..000000000000 --- a/arch/s390/configs/gcov_defconfig +++ /dev/null @@ -1,661 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_NUMA_BALANCING=y -# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_BLK_CGROUP=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_SYSFS_SYSCALL is not set -CONFIG_BPF_SYSCALL=y -CONFIG_USERFAULTFD=y -# CONFIG_COMPAT_BRK is not set -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -CONFIG_GCOV_KERNEL=y -CONFIG_GCOV_PROFILE_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_THROTTLING=y -CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_SQ=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_IBM_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_DEFAULT_DEADLINE=y -CONFIG_LIVEPATCH=y -CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=512 -CONFIG_NUMA=y -CONFIG_HZ_100=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_KSM=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -CONFIG_MEM_SOFT_DIRTY=y -CONFIG_ZSWAP=y -CONFIG_ZBUD=m -CONFIG_ZSMALLOC=m -CONFIG_ZSMALLOC_STAT=y -CONFIG_DEFERRED_STRUCT_PAGE_INIT=y -CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_PCI=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_S390=y -CONFIG_CHSC_SCH=y -CONFIG_CRASH_DUMP=y -CONFIG_BINFMT_MISC=m -CONFIG_HIBERNATION=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_TABLES=m -CONFIG_NFT_EXTHDR=m -CONFIG_NFT_META=m -CONFIG_NFT_CT=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_NAT=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_PE_SIP=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_NF_TABLES_IPV4=m -CONFIG_NFT_CHAIN_ROUTE_IPV4=m -CONFIG_NF_TABLES_ARP=m -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_NF_TABLES_IPV6=m -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NET_SCTPPROBE=m -CONFIG_RDS=m -CONFIG_RDS_RDMA=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_DNS_RESOLVER=y -CONFIG_NETLINK_DIAG=m -CONFIG_CGROUP_NET_PRIO=y -CONFIG_BPF_JIT=y -CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m -CONFIG_DEVTMPFS=y -CONFIG_DMA_CMA=y -CONFIG_CMA_SIZE_MBYTES=0 -CONFIG_CONNECTOR=y -CONFIG_ZRAM=m -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_BLK_DEV_RAM_DAX=y -CONFIG_VIRTIO_BLK=y -CONFIG_ENCLOSURE_SERVICES=m -CONFIG_GENWQE=m -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_ISCSI_TCP=m -CONFIG_SCSI_DEBUG=m -CONFIG_ZFCP=y -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -CONFIG_DM_SWITCH=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -CONFIG_IFB=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_VXLAN=m -CONFIG_TUN=m -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_MLX4_EN=m -CONFIG_MLX5_CORE=m -CONFIG_MLX5_CORE_EN=y -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_RAW_DRIVER=m -CONFIG_HANGCHECK_TIMER=m -CONFIG_TN3270_FS=y -# CONFIG_HWMON is not set -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_NOWAYOUT=y -CONFIG_SOFT_WATCHDOG=m -CONFIG_DIAG288_WATCHDOG=m -# CONFIG_HID is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_VFIO=m -CONFIG_VFIO_PCI=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_JBD2_DEBUG=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -CONFIG_OCFS2_FS=m -CONFIG_BTRFS_FS=y -CONFIG_BTRFS_FS_POSIX_ACL=y -CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_AUTOFS4_FS=m -CONFIG_FUSE_FS=y -CONFIG_CUSE=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_FSCACHE=m -CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_HUGETLBFS=y -CONFIG_CONFIGFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_ROMFS_FS=m -CONFIG_NFS_FS=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -CONFIG_NFS_SWAP=y -CONFIG_NFSD=m -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -CONFIG_CIFS_STATS2=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG is not set -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_INFO_DWARF4=y -CONFIG_GDB_SCRIPTS=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=1024 -CONFIG_UNUSED_SYMBOLS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_PANIC_ON_OOPS=y -CONFIG_RCU_TORTURE_TEST=m -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_LATENCYTOP=y -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_HIST_TRIGGERS=y -CONFIG_LKDTM=m -CONFIG_PERCPU_TEST=m -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_BIG_KEYS=y -CONFIG_ENCRYPTED_KEYS=m -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_IMA=y -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_APPRAISE=y -CONFIG_CRYPTO_DH=m -CONFIG_CRYPTO_ECDH=m -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_KEYWRAP=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES_TI=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_842=m -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_ZCRYPT=m -CONFIG_PKEY=m -CONFIG_CRYPTO_PAES_S390=m -CONFIG_CRYPTO_SHA1_S390=m -CONFIG_CRYPTO_SHA256_S390=m -CONFIG_CRYPTO_SHA512_S390=m -CONFIG_CRYPTO_DES_S390=m -CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=y -CONFIG_CRC7=m -CONFIG_CRC8=m -CONFIG_CORDIC=m -CONFIG_CMM=m -CONFIG_APPLDATA_BASE=y -CONFIG_KVM=m -CONFIG_KVM_S390_UCONTROL=y -CONFIG_VHOST_NET=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 20ed149e1137..c105bcc6d7a6 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -25,13 +25,13 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -45,6 +45,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y @@ -62,6 +64,7 @@ CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 CONFIG_NUMA=y CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -301,7 +304,6 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m @@ -359,11 +361,11 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_OPENVSWITCH=m CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 @@ -375,8 +377,9 @@ CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m CONFIG_ENCLOSURE_SERVICES=m CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m @@ -455,6 +458,7 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -468,6 +472,9 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FRAMEBUFFER_CONSOLE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -476,7 +483,9 @@ CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m CONFIG_VFIO=m CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -507,7 +516,6 @@ CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=y CONFIG_CUSE=m CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_FSCACHE=m CONFIG_CACHEFILES=m CONFIG_ISO9660_FS=y @@ -592,8 +600,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 46a3178d8bc6..f40600eb1762 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_CPU_ISOLATION is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y @@ -23,12 +24,12 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -47,6 +48,7 @@ CONFIG_LIVEPATCH=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -129,10 +131,13 @@ CONFIG_EQUALIZER=m CONFIG_TUN=m CONFIG_VIRTIO_NET=y # CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set +# CONFIG_VT is not set CONFIG_DEVKMEM=y CONFIG_RAW_DRIVER=m CONFIG_VIRTIO_BALLOON=y @@ -177,13 +182,15 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_FUNCTION_PROFILER=y -CONFIG_KPROBES_SANITY_TEST=y +# CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_S390_PTDUMP=y CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -213,6 +220,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 43bbe63e2992..06b513d192b9 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb) if (sb->s_root) hypfs_delete_tree(sb->s_root); - if (sb_info->update_file) + if (sb_info && sb_info->update_file) hypfs_remove(sb_info->update_file); kfree(sb->s_fs_info); sb->s_fs_info = NULL; diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 1d708a419326..825dd0f7f221 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -46,4 +46,27 @@ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { } +struct kimage; +struct s390_load_data { + /* Pointer to the kernel buffer. Used to register cmdline etc.. */ + void *kernel_buf; + + /* Total size of loaded segments in memory. Used as an offset. */ + size_t memsz; + + /* Load address of initrd. Used to register INITRD_START in kernel. */ + unsigned long initrd_load_addr; +}; + +int kexec_file_add_purgatory(struct kimage *image, + struct s390_load_data *data); +int kexec_file_add_initrd(struct kimage *image, + struct s390_load_data *data, + char *initrd, unsigned long initrd_len); +int *kexec_file_update_kernel(struct kimage *iamge, + struct s390_load_data *data); + +extern const struct kexec_file_ops s390_kexec_image_ops; +extern const struct kexec_file_ops s390_kexec_elf_ops; + #endif /*_S390_KEXEC_H */ diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h new file mode 100644 index 000000000000..e297bcfc476f --- /dev/null +++ b/arch/s390/include/asm/purgatory.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#ifndef _S390_PURGATORY_H_ +#define _S390_PURGATORY_H_ +#ifndef __ASSEMBLY__ + +#include <linux/purgatory.h> + +int verify_sha256_digest(void); + +#endif /* __ASSEMBLY__ */ +#endif /* _S390_PURGATORY_H_ */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 124154fdfc97..9c30ebe046f3 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * S390 version - * Copyright IBM Corp. 1999, 2010 + * Copyright IBM Corp. 1999, 2017 */ #ifndef _ASM_S390_SETUP_H #define _ASM_S390_SETUP_H @@ -37,17 +37,31 @@ #define LPP_MAGIC _BITUL(31) #define LPP_PID_MASK _AC(0xffffffff, UL) +/* Offsets to entry points in kernel/head.S */ + +#define STARTUP_NORMAL_OFFSET 0x10000 +#define STARTUP_KDUMP_OFFSET 0x10010 + +/* Offsets to parameters in kernel/head.S */ + +#define IPL_DEVICE_OFFSET 0x10400 +#define INITRD_START_OFFSET 0x10408 +#define INITRD_SIZE_OFFSET 0x10410 +#define OLDMEM_BASE_OFFSET 0x10418 +#define OLDMEM_SIZE_OFFSET 0x10420 +#define COMMAND_LINE_OFFSET 0x10480 + #ifndef __ASSEMBLY__ #include <asm/lowcore.h> #include <asm/types.h> -#define IPL_DEVICE (*(unsigned long *) (0x10400)) -#define INITRD_START (*(unsigned long *) (0x10408)) -#define INITRD_SIZE (*(unsigned long *) (0x10410)) -#define OLDMEM_BASE (*(unsigned long *) (0x10418)) -#define OLDMEM_SIZE (*(unsigned long *) (0x10420)) -#define COMMAND_LINE ((char *) (0x10480)) +#define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET)) +#define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET)) +#define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET)) +#define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET)) +#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) +#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) extern int memory_end_set; extern unsigned long memory_end; @@ -121,12 +135,12 @@ extern void (*_machine_power_off)(void); #else /* __ASSEMBLY__ */ -#define IPL_DEVICE 0x10400 -#define INITRD_START 0x10408 -#define INITRD_SIZE 0x10410 -#define OLDMEM_BASE 0x10418 -#define OLDMEM_SIZE 0x10420 -#define COMMAND_LINE 0x10480 +#define IPL_DEVICE (IPL_DEVICE_OFFSET) +#define INITRD_START (INITRD_START_OFFSET) +#define INITRD_SIZE (INITRD_SIZE_OFFSET) +#define OLDMEM_BASE (OLDMEM_BASE_OFFSET) +#define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET) +#define COMMAND_LINE (COMMAND_LINE_OFFSET) #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_SETUP_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 83ba57533ce6..3c883c368eb0 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -45,6 +45,9 @@ struct thread_info { void arch_release_task_struct(struct task_struct *tsk); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); +void arch_setup_new_exec(void); +#define arch_setup_new_exec arch_setup_new_exec + #endif /* diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h index c57f9d28d894..9a14a611ed82 100644 --- a/arch/s390/include/uapi/asm/signal.h +++ b/arch/s390/include/uapi/asm/signal.h @@ -97,22 +97,31 @@ typedef unsigned long sigset_t; #include <asm-generic/signal-defs.h> #ifndef __KERNEL__ -/* Here we must cater to libcs that poke about in kernel headers. */ +/* + * There are two system calls in regard to sigaction, sys_rt_sigaction + * and sys_sigaction. Internally the kernel uses the struct old_sigaction + * for the older sys_sigaction system call, and the kernel version of the + * struct sigaction for the newer sys_rt_sigaction. + * + * The uapi definition for struct sigaction has made a strange distinction + * between 31-bit and 64-bit in the past. For 64-bit the uapi structure + * looks like the kernel struct sigaction, but for 31-bit it used to + * look like the kernel struct old_sigaction. That practically made the + * structure unusable for either system call. To get around this problem + * the glibc always had its own definitions for the sigaction structures. + * + * The current struct sigaction uapi definition below is suitable for the + * sys_rt_sigaction system call only. + */ struct sigaction { union { __sighandler_t _sa_handler; void (*_sa_sigaction)(int, struct siginfo *, void *); } _u; -#ifndef __s390x__ /* lovely */ - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -#else /* __s390x__ */ unsigned long sa_flags; void (*sa_restorer)(void); sigset_t sa_mask; -#endif /* __s390x__ */ }; #define sa_handler _u._sa_handler diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index b06a6f79c1ec..84ea6225efb4 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -82,6 +82,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o +obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o + obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cfe2c45c5180..eb2a5c0443cd 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -10,6 +10,7 @@ #include <linux/kbuild.h> #include <linux/kvm_host.h> #include <linux/sched.h> +#include <linux/purgatory.h> #include <asm/idle.h> #include <asm/vdso.h> #include <asm/pgtable.h> @@ -204,5 +205,9 @@ int main(void) OFFSET(__GMAP_ASCE, gmap, asce); OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); + /* kexec_sha_region */ + OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); + OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len); + DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region)); return 0; } diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 11e9d8b5c1b0..607c5e9fba3d 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -182,3 +182,4 @@ COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); +COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c new file mode 100644 index 000000000000..5a286b012043 --- /dev/null +++ b/arch/s390/kernel/kexec_elf.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ELF loader for kexec_file_load system call. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <asm/setup.h> + +static int kexec_file_add_elf_kernel(struct kimage *image, + struct s390_load_data *data, + char *kernel, unsigned long kernel_len) +{ + struct kexec_buf buf; + const Elf_Ehdr *ehdr; + const Elf_Phdr *phdr; + int i, ret; + + ehdr = (Elf_Ehdr *)kernel; + buf.image = image; + + phdr = (void *)ehdr + ehdr->e_phoff; + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + if (phdr->p_type != PT_LOAD) + continue; + + buf.buffer = kernel + phdr->p_offset; + buf.bufsz = phdr->p_filesz; + + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); + buf.memsz = phdr->p_memsz; + + if (phdr->p_paddr == 0) { + data->kernel_buf = buf.buffer; + data->memsz += STARTUP_NORMAL_OFFSET; + + buf.buffer += STARTUP_NORMAL_OFFSET; + buf.bufsz -= STARTUP_NORMAL_OFFSET; + + buf.mem += STARTUP_NORMAL_OFFSET; + buf.memsz -= STARTUP_NORMAL_OFFSET; + } + + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + + ret = kexec_add_buffer(&buf); + if (ret) + return ret; + + data->memsz += buf.memsz; + } + + return 0; +} + +static void *s390_elf_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + struct s390_load_data data = {0}; + const Elf_Ehdr *ehdr; + const Elf_Phdr *phdr; + size_t size; + int i, ret; + + /* image->fobs->probe already checked for valid ELF magic number. */ + ehdr = (Elf_Ehdr *)kernel; + + if (ehdr->e_type != ET_EXEC || + ehdr->e_ident[EI_CLASS] != ELFCLASS64 || + !elf_check_arch(ehdr)) + return ERR_PTR(-EINVAL); + + if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr)) + return ERR_PTR(-EINVAL); + + size = ehdr->e_ehsize + ehdr->e_phoff; + size += ehdr->e_phentsize * ehdr->e_phnum; + if (size > kernel_len) + return ERR_PTR(-EINVAL); + + phdr = (void *)ehdr + ehdr->e_phoff; + size = ALIGN(size, phdr->p_align); + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + if (phdr->p_type == PT_INTERP) + return ERR_PTR(-EINVAL); + + if (phdr->p_offset > kernel_len) + return ERR_PTR(-EINVAL); + + size += ALIGN(phdr->p_filesz, phdr->p_align); + } + + if (size > kernel_len) + return ERR_PTR(-EINVAL); + + ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len); + if (ret) + return ERR_PTR(ret); + + if (!data.memsz) + return ERR_PTR(-EINVAL); + + if (initrd) { + ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); + if (ret) + return ERR_PTR(ret); + } + + ret = kexec_file_add_purgatory(image, &data); + if (ret) + return ERR_PTR(ret); + + return kexec_file_update_kernel(image, &data); +} + +static int s390_elf_probe(const char *buf, unsigned long len) +{ + const Elf_Ehdr *ehdr; + + if (len < sizeof(Elf_Ehdr)) + return -ENOEXEC; + + ehdr = (Elf_Ehdr *)buf; + + /* Only check the ELF magic number here and do proper validity check + * in the loader. Any check here that fails would send the erroneous + * ELF file to the image loader that does not care what it gets. + * (Most likely) causing behavior not intended by the user. + */ + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) + return -ENOEXEC; + + return 0; +} + +const struct kexec_file_ops s390_kexec_elf_ops = { + .probe = s390_elf_probe, + .load = s390_elf_load, +}; diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c new file mode 100644 index 000000000000..3800852595e8 --- /dev/null +++ b/arch/s390/kernel/kexec_image.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Image loader for kexec_file_load system call. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <asm/setup.h> + +static int kexec_file_add_image_kernel(struct kimage *image, + struct s390_load_data *data, + char *kernel, unsigned long kernel_len) +{ + struct kexec_buf buf; + int ret; + + buf.image = image; + + buf.buffer = kernel + STARTUP_NORMAL_OFFSET; + buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET; + + buf.mem = STARTUP_NORMAL_OFFSET; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + buf.memsz = buf.bufsz; + + ret = kexec_add_buffer(&buf); + + data->kernel_buf = kernel; + data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET; + + return ret; +} + +static void *s390_image_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + struct s390_load_data data = {0}; + int ret; + + ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len); + if (ret) + return ERR_PTR(ret); + + if (initrd) { + ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); + if (ret) + return ERR_PTR(ret); + } + + ret = kexec_file_add_purgatory(image, &data); + if (ret) + return ERR_PTR(ret); + + return kexec_file_update_kernel(image, &data); +} + +static int s390_image_probe(const char *buf, unsigned long len) +{ + /* Can't reliably tell if an image is valid. Therefore give the + * user whatever he wants. + */ + return 0; +} + +const struct kexec_file_ops s390_kexec_image_ops = { + .probe = s390_image_probe, + .load = s390_image_load, +}; diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c new file mode 100644 index 000000000000..f413f57f8d20 --- /dev/null +++ b/arch/s390/kernel/machine_kexec_file.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * s390 code for kexec_file_load system call + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/elf.h> +#include <linux/kexec.h> +#include <asm/setup.h> + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &s390_kexec_elf_ops, + &s390_kexec_image_ops, + NULL, +}; + +int *kexec_file_update_kernel(struct kimage *image, + struct s390_load_data *data) +{ + unsigned long *loc; + + if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) + return ERR_PTR(-EINVAL); + + if (image->cmdline_buf_len) + memcpy(data->kernel_buf + COMMAND_LINE_OFFSET, + image->cmdline_buf, image->cmdline_buf_len); + + if (image->type == KEXEC_TYPE_CRASH) { + loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET); + *loc = crashk_res.start; + + loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET); + *loc = crashk_res.end - crashk_res.start + 1; + } + + if (image->initrd_buf) { + loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET); + *loc = data->initrd_load_addr; + + loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET); + *loc = image->initrd_buf_len; + } + + return NULL; +} + +static int kexec_file_update_purgatory(struct kimage *image) +{ + u64 entry, type; + int ret; + + if (image->type == KEXEC_TYPE_CRASH) { + entry = STARTUP_KDUMP_OFFSET; + type = KEXEC_TYPE_CRASH; + } else { + entry = STARTUP_NORMAL_OFFSET; + type = KEXEC_TYPE_DEFAULT; + } + + ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, + sizeof(entry), false); + if (ret) + return ret; + + ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, + sizeof(type), false); + if (ret) + return ret; + + if (image->type == KEXEC_TYPE_CRASH) { + u64 crash_size; + + ret = kexec_purgatory_get_set_symbol(image, "crash_start", + &crashk_res.start, + sizeof(crashk_res.start), + false); + if (ret) + return ret; + + crash_size = crashk_res.end - crashk_res.start + 1; + ret = kexec_purgatory_get_set_symbol(image, "crash_size", + &crash_size, + sizeof(crash_size), + false); + } + return ret; +} + +int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data) +{ + struct kexec_buf buf; + int ret; + + buf.image = image; + + data->memsz = ALIGN(data->memsz, PAGE_SIZE); + buf.mem = data->memsz; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + + ret = kexec_load_purgatory(image, &buf); + if (ret) + return ret; + + ret = kexec_file_update_purgatory(image); + return ret; +} + +int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data, + char *initrd, unsigned long initrd_len) +{ + struct kexec_buf buf; + int ret; + + buf.image = image; + + buf.buffer = initrd; + buf.bufsz = initrd_len; + + data->memsz = ALIGN(data->memsz, PAGE_SIZE); + buf.mem = data->memsz; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + buf.memsz = buf.bufsz; + + data->initrd_load_addr = buf.mem; + data->memsz += buf.memsz; + + ret = kexec_add_buffer(&buf); + return ret; +} + +/* + * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole + * and provide kbuf->mem by hand. + */ +int arch_kexec_walk_mem(struct kexec_buf *kbuf, + int (*func)(struct resource *, void *)) +{ + return 1; +} + +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab) +{ + Elf_Rela *relas; + int i; + + relas = (void *)pi->ehdr + relsec->sh_offset; + + for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { + const Elf_Sym *sym; /* symbol to relocate */ + unsigned long addr; /* final location after relocation */ + unsigned long val; /* relocated symbol value */ + void *loc; /* tmp location to modify */ + + sym = (void *)pi->ehdr + symtab->sh_offset; + sym += ELF64_R_SYM(relas[i].r_info); + + if (sym->st_shndx == SHN_UNDEF) + return -ENOEXEC; + + if (sym->st_shndx == SHN_COMMON) + return -ENOEXEC; + + if (sym->st_shndx >= pi->ehdr->e_shnum && + sym->st_shndx != SHN_ABS) + return -ENOEXEC; + + loc = pi->purgatory_buf; + loc += section->sh_offset; + loc += relas[i].r_offset; + + val = sym->st_value; + if (sym->st_shndx != SHN_ABS) + val += pi->sechdrs[sym->st_shndx].sh_addr; + val += relas[i].r_addend; + + addr = section->sh_addr + relas[i].r_offset; + + switch (ELF64_R_TYPE(relas[i].r_info)) { + case R_390_8: /* Direct 8 bit. */ + *(u8 *)loc = val; + break; + case R_390_12: /* Direct 12 bit. */ + *(u16 *)loc &= 0xf000; + *(u16 *)loc |= val & 0xfff; + break; + case R_390_16: /* Direct 16 bit. */ + *(u16 *)loc = val; + break; + case R_390_20: /* Direct 20 bit. */ + *(u32 *)loc &= 0xf00000ff; + *(u32 *)loc |= (val & 0xfff) << 16; /* DL */ + *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */ + break; + case R_390_32: /* Direct 32 bit. */ + *(u32 *)loc = val; + break; + case R_390_64: /* Direct 64 bit. */ + *(u64 *)loc = val; + break; + case R_390_PC16: /* PC relative 16 bit. */ + *(u16 *)loc = (val - addr); + break; + case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ + *(u16 *)loc = (val - addr) >> 1; + break; + case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ + *(u32 *)loc = (val - addr) >> 1; + break; + case R_390_PC32: /* PC relative 32 bit. */ + *(u32 *)loc = (val - addr); + break; + case R_390_PC64: /* PC relative 64 bit. */ + *(u64 *)loc = (val - addr); + break; + default: + break; + } + } + return 0; +} + +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len) +{ + /* A kernel must be at least large enough to contain head.S. During + * load memory in head.S will be accessed, e.g. to register the next + * command line. If the next kernel were smaller the current kernel + * will panic at load. + * + * 0x11000 = sizeof(head.S) + */ + if (buf_len < 0x11000) + return -ENOEXEC; + + return kexec_image_probe_default(image, buf, buf_len); +} diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 5a83be955c70..0dc8ac8548ee 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr, apply_alternatives(aseg, aseg + s->sh_size); if (IS_ENABLED(CONFIG_EXPOLINE) && - (!strcmp(".nospec_call_table", secname))) + (!strncmp(".s390_indirect", secname, 14))) nospec_revert(aseg, aseg + s->sh_size); if (IS_ENABLED(CONFIG_EXPOLINE) && - (!strcmp(".nospec_return_table", secname))) + (!strncmp(".s390_return", secname, 12))) nospec_revert(aseg, aseg + s->sh_size); } diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index f236ce8757e8..46d49a11663f 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/device.h> +#include <linux/cpu.h> #include <asm/nospec-branch.h> static int __init nobp_setup_early(char *str) diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index c5bc3f209652..feebb2944882 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); -CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080); +CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080); CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081); CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082); CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083); @@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db); CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc); CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); -CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080); +CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080); CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081); CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082); CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083); @@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { }; static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { - CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL), + CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES), CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES), CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES), CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES), @@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { }; static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = { - CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL), + CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES), CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES), CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES), CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES), @@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void) model = cpumcf_z13_pmu_event_attr; break; case 0x3906: + case 0x3907: model = cpumcf_z14_pmu_event_attr; break; default: diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 70576a2f69cf..6e758bb6cd29 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/random.h> #include <linux/export.h> #include <linux/init_task.h> +#include <asm/cpu_mf.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/vtimer.h> @@ -48,6 +49,15 @@ void flush_thread(void) { } +void arch_setup_new_exec(void) +{ + if (S390_lowcore.current_pid != current->pid) { + S390_lowcore.current_pid = current->pid; + if (test_facility(40)) + lpp(&S390_lowcore.lpp); + } +} + void arch_release_task_struct(struct task_struct *tsk) { runtime_instr_release(tsk); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index fc3b4aa185cc..d82a9ec64ea9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -821,6 +821,7 @@ static int __init setup_hwcaps(void) strcpy(elf_platform, "z13"); break; case 0x3906: + case 0x3907: strcpy(elf_platform, "z14"); break; } diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index b38d48464368..8b210ead7956 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -388,3 +388,4 @@ 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage 379 common statx sys_statx compat_sys_statx 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi +381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index d9d1f512f019..5007fac01bb5 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, return orig; } +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return user_stack_pointer(regs) <= ret->stack; + else + return user_stack_pointer(regs) < ret->stack; +} + /* Instruction Emulation */ static void adjust_psw_addr(psw_t *psw, unsigned long len) diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore new file mode 100644 index 000000000000..e9e66f178a6d --- /dev/null +++ b/arch/s390/purgatory/.gitignore @@ -0,0 +1,2 @@ +kexec-purgatory.c +purgatory.ro diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile new file mode 100644 index 000000000000..e9525bc1b4a6 --- /dev/null +++ b/arch/s390/purgatory/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 + +OBJECT_FILES_NON_STANDARD := y + +purgatory-y := head.o purgatory.o string.o sha256.o mem.o + +targets += $(purgatory-y) purgatory.ro kexec-purgatory.c +PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) + +$(obj)/sha256.o: $(srctree)/lib/sha256.c + $(call if_changed_rule,cc_o_c) + +$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S + $(call if_changed_rule,as_o_S) + +$(obj)/string.o: $(srctree)/arch/s390/lib/string.c + $(call if_changed_rule,cc_o_c) + +LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib +LDFLAGS_purgatory.ro += -z nodefaultlib +KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes +KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare +KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding +KBUILD_CFLAGS += -c -MD -Os -m64 +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) + +$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE + $(call if_changed,ld) + +CMD_BIN2C = $(objtree)/scripts/basic/bin2c +quiet_cmd_bin2c = BIN2C $@ + cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ + +$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE + $(call if_changed,bin2c) + +obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S new file mode 100644 index 000000000000..660c96a05a9b --- /dev/null +++ b/arch/s390/purgatory/head.S @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Purgatory setup code + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/sigp.h> + +/* The purgatory is the code running between two kernels. It's main purpose + * is to verify that the next kernel was not corrupted after load and to + * start it. + * + * If the next kernel is a crash kernel there are some peculiarities to + * consider: + * + * First the purgatory is called twice. Once only to verify the + * sha digest. So if the crash kernel got corrupted the old kernel can try + * to trigger a stand-alone dumper. And once to actually load the crash kernel. + * + * Second the purgatory also has to swap the crash memory region with its + * destination at address 0. As the purgatory is part of crash memory this + * requires some finesse. The tactic here is that the purgatory first copies + * itself to the end of the destination and then swaps the rest of the + * memory running from there. + */ + +#define bufsz purgatory_end-stack + +.macro MEMCPY dst,src,len + lgr %r0,\dst + lgr %r1,\len + lgr %r2,\src + lgr %r3,\len + +20: mvcle %r0,%r2,0 + jo 20b +.endm + +.macro MEMSWAP dst,src,buf,len +10: cghi \len,bufsz + jh 11f + lgr %r4,\len + j 12f +11: lghi %r4,bufsz + +12: MEMCPY \buf,\dst,%r4 + MEMCPY \dst,\src,%r4 + MEMCPY \src,\buf,%r4 + + agr \dst,%r4 + agr \src,%r4 + sgr \len,%r4 + + cghi \len,0 + jh 10b +.endm + +.macro START_NEXT_KERNEL base + lg %r4,kernel_entry-\base(%r13) + lg %r5,load_psw_mask-\base(%r13) + ogr %r4,%r5 + stg %r4,0(%r0) + + xgr %r0,%r0 + diag %r0,%r0,0x308 +.endm + +.text +.align PAGE_SIZE +ENTRY(purgatory_start) + /* The purgatory might be called after a diag308 so better set + * architecture and addressing mode. + */ + lhi %r1,1 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE + sam64 + + larl %r5,gprregs + stmg %r6,%r15,0(%r5) + + basr %r13,0 +.base_crash: + + /* Setup stack */ + larl %r15,purgatory_end + aghi %r15,-160 + + /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called + * directly with a flag passed in %r2 whether the purgatory shall do + * checksum verification only (%r2 = 0 -> verification only). + * + * Check now and preserve over C function call by storing in + * %r10 whith + * 1 -> checksum verification only + * 0 -> load new kernel + */ + lghi %r10,0 + lg %r11,kernel_type-.base_crash(%r13) + cghi %r11,1 /* KEXEC_TYPE_CRASH */ + jne .do_checksum_verification + cghi %r2,0 /* checksum verification only */ + jne .do_checksum_verification + lghi %r10,1 + +.do_checksum_verification: + brasl %r14,verify_sha256_digest + + cghi %r10,1 /* checksum verification only */ + je .return_old_kernel + cghi %r2,0 /* checksum match */ + jne .disabled_wait + + /* If the next kernel is a crash kernel the purgatory has to swap + * the mem regions first. + */ + cghi %r11,1 /* KEXEC_TYPE_CRASH */ + je .start_crash_kernel + + /* start normal kernel */ + START_NEXT_KERNEL .base_crash + +.return_old_kernel: + lmg %r6,%r15,gprregs-.base_crash(%r13) + br %r14 + +.disabled_wait: + lpswe disabled_wait_psw-.base_crash(%r13) + +.start_crash_kernel: + /* Location of purgatory_start in crash memory */ + lgr %r8,%r13 + aghi %r8,-(.base_crash-purgatory_start) + + /* Destination for this code i.e. end of memory to be swapped. */ + lg %r9,crash_size-.base_crash(%r13) + aghi %r9,-(purgatory_end-purgatory_start) + + /* Destination in crash memory, i.e. same as r9 but in crash memory. */ + lg %r10,crash_start-.base_crash(%r13) + agr %r10,%r9 + + /* Buffer location (in crash memory) and size. As the purgatory is + * behind the point of no return it can re-use the stack as buffer. + */ + lghi %r11,bufsz + larl %r12,stack + + MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ + MEMCPY %r9,%r8,%r11 /* self -> dst */ + + /* Jump to new location. */ + lgr %r7,%r9 + aghi %r7,.jump_to_dst-purgatory_start + br %r7 + +.jump_to_dst: + basr %r13,0 +.base_dst: + + /* clear buffer */ + MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ + + /* Load new buffer location after jump */ + larl %r7,stack + aghi %r10,stack-purgatory_start + MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ + + /* Now the code is set up to run from its designated location. Start + * swapping the rest of crash memory now. + * + * The registers will be used as follow: + * + * %r0-%r4 reserved for macros defined above + * %r5-%r6 tmp registers + * %r7 pointer to current struct sha region + * %r8 index to iterate over all sha regions + * %r9 pointer in crash memory + * %r10 pointer in old kernel + * %r11 total size (still) to be moved + * %r12 pointer to buffer + */ + lgr %r12,%r7 + lgr %r11,%r9 + lghi %r10,0 + lg %r9,crash_start-.base_dst(%r13) + lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ + larl %r7,purgatory_sha_regions + + j .loop_first + + /* Loop over all purgatory_sha_regions. */ +.loop_next: + aghi %r8,-1 + cghi %r8,0 + je .loop_out + + aghi %r7,__KEXEC_SHA_REGION_SIZE + +.loop_first: + lg %r5,__KEXEC_SHA_REGION_START(%r7) + cghi %r5,0 + je .loop_next + + /* Copy [end last sha region, start current sha region) */ + /* Note: kexec_sha_region->start points in crash memory */ + sgr %r5,%r9 + MEMCPY %r9,%r10,%r5 + + agr %r9,%r5 + agr %r10,%r5 + sgr %r11,%r5 + + /* Swap sha region */ + lg %r6,__KEXEC_SHA_REGION_LEN(%r7) + MEMSWAP %r9,%r10,%r12,%r6 + sg %r11,__KEXEC_SHA_REGION_LEN(%r7) + j .loop_next + +.loop_out: + /* Copy rest of crash memory */ + MEMCPY %r9,%r10,%r11 + + /* start crash kernel */ + START_NEXT_KERNEL .base_dst + + +load_psw_mask: + .long 0x00080000,0x80000000 + + .align 8 +disabled_wait_psw: + .quad 0x0002000180000000 + .quad 0x0000000000000000 + .do_checksum_verification + +gprregs: + .rept 10 + .quad 0 + .endr + +purgatory_sha256_digest: + .global purgatory_sha256_digest + .rept 32 /* SHA256_DIGEST_SIZE */ + .byte 0 + .endr + +purgatory_sha_regions: + .global purgatory_sha_regions + .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */ + .byte 0 + .endr + +kernel_entry: + .global kernel_entry + .quad 0 + +kernel_type: + .global kernel_type + .quad 0 + +crash_start: + .global crash_start + .quad 0 + +crash_size: + .global crash_size + .quad 0 + + .align PAGE_SIZE +stack: + /* The buffer to move this code must be as big as the code. */ + .skip stack-purgatory_start + .align PAGE_SIZE +purgatory_end: diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c new file mode 100644 index 000000000000..4e2beb3c29b7 --- /dev/null +++ b/arch/s390/purgatory/purgatory.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Purgatory code running between two kernels. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/kexec.h> +#include <linux/sha256.h> +#include <linux/string.h> +#include <asm/purgatory.h> + +struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; +u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; + +u64 kernel_entry; +u64 kernel_type; + +u64 crash_start; +u64 crash_size; + +int verify_sha256_digest(void) +{ + struct kexec_sha_region *ptr, *end; + u8 digest[SHA256_DIGEST_SIZE]; + struct sha256_state sctx; + + sha256_init(&sctx); + end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); + + for (ptr = purgatory_sha_regions; ptr < end; ptr++) + sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); + + sha256_final(&sctx, digest); + + if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) + return 1; + + return 0; +} diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 97fe29316476..1851eaeee131 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -9,6 +9,7 @@ config SUPERH select HAVE_IDE if HAS_IOPORT_MAP select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP + select NO_BOOTMEM select ARCH_DISCARD_MEMBLOCK select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index 4205f6d42b69..a5bd03642678 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c @@ -43,7 +43,11 @@ void __ref cpu_probe(void) #endif #if defined(CONFIG_CPU_J2) +#if defined(CONFIG_SMP) unsigned cpu = hard_smp_processor_id(); +#else + unsigned cpu = 0; +#endif if (cpu == 0) of_scan_flat_dt(scan_cache, NULL); if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu); if (cpu != 0) return; diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index d34e998b809f..c286cf5da6e7 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -11,7 +11,6 @@ #include <linux/ioport.h> #include <linux/init.h> #include <linux/initrd.h> -#include <linux/bootmem.h> #include <linux/console.h> #include <linux/root_dev.h> #include <linux/utsname.h> diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 8ce98691d822..f1b44697ad68 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -59,7 +59,9 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); - *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); + *dma_handle = virt_to_phys(ret); + if (!WARN_ON(!dev)) + *dma_handle -= PFN_PHYS(dev->dma_pfn_offset); return ret_nocache; } @@ -69,9 +71,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size, unsigned long attrs) { int order = get_order(size); - unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset; + unsigned long pfn = dma_handle >> PAGE_SHIFT; int k; + if (!WARN_ON(!dev)) + pfn += dev->dma_pfn_offset; + for (k = 0; k < (1 << order); k++) __free_pages(pfn_to_page(pfn + k), 0); @@ -143,7 +148,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev, if (!memsize) return 0; - buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); + buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL); if (!buf) { pr_warning("%s: unable to allocate memory\n", name); return -ENOMEM; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index ce0bbaa7e404..4034035fbede 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -211,59 +211,15 @@ void __init allocate_pgdat(unsigned int nid) NODE_DATA(nid) = __va(phys); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; #endif NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; } -static void __init bootmem_init_one_node(unsigned int nid) -{ - unsigned long total_pages, paddr; - unsigned long end_pfn; - struct pglist_data *p; - - p = NODE_DATA(nid); - - /* Nothing to do.. */ - if (!p->node_spanned_pages) - return; - - end_pfn = pgdat_end_pfn(p); - - total_pages = bootmem_bootmap_pages(p->node_spanned_pages); - - paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); - if (!paddr) - panic("Can't allocate bootmap for nid[%d]\n", nid); - - init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); - - free_bootmem_with_active_regions(nid, end_pfn); - - /* - * XXX Handle initial reservations for the system memory node - * only for the moment, we'll refactor this later for handling - * reservations in other nodes. - */ - if (nid == 0) { - struct memblock_region *reg; - - /* Reserve the sections we're already using. */ - for_each_memblock(reserved, reg) { - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); - } - } - - sparse_memory_present_with_active_regions(nid); -} - static void __init do_init_bootmem(void) { struct memblock_region *reg; - int i; /* Add active regions with valid PFNs. */ for_each_memblock(memory, reg) { @@ -279,9 +235,12 @@ static void __init do_init_bootmem(void) plat_mem_setup(); - for_each_online_node(i) - bootmem_init_one_node(i); + for_each_memblock(memory, reg) { + int nid = memblock_get_region_node(reg); + memory_present(nid, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); + } sparse_init(); } @@ -322,7 +281,6 @@ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long vaddr, end; - int nid; sh_mv.mv_mem_init(); @@ -377,21 +335,7 @@ void __init paging_init(void) kmap_coherent_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); - - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - unsigned long low, start_pfn; - - start_pfn = pgdat->bdata->node_min_pfn; - low = pgdat->bdata->node_low_pfn; - - if (max_zone_pfns[ZONE_NORMAL] < low) - max_zone_pfns[ZONE_NORMAL] = low; - - printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", - nid, start_pfn, low); - } - + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); } diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 05713d190247..830e8b3684e4 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -8,7 +8,6 @@ * for more details. */ #include <linux/module.h> -#include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/numa.h> @@ -26,9 +25,7 @@ EXPORT_SYMBOL_GPL(node_data); */ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) { - unsigned long bootmap_pages; unsigned long start_pfn, end_pfn; - unsigned long bootmem_paddr; /* Don't allow bogus node assignment */ BUG_ON(nid >= MAX_NUMNODES || nid <= 0); @@ -48,25 +45,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) SMP_CACHE_BYTES, end)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; - /* Node-local bootmap */ - bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, - PAGE_SIZE, end); - init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, - start_pfn, end_pfn); - - free_bootmem_with_active_regions(nid, end_pfn); - - /* Reserve the pgdat and bootmap space with the bootmem allocator */ - reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, - sizeof(struct pglist_data), BOOTMEM_DEFAULT); - reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, - bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); - /* It's up */ node_set_online(nid); diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h index 722951908b0a..4f6676fe4bcc 100644 --- a/arch/sparc/include/uapi/asm/oradax.h +++ b/arch/sparc/include/uapi/asm/oradax.h @@ -3,7 +3,7 @@ * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or + * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1a0fa10cb6b7..32bae68e34c1 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, if (err) { printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", dev_name(&vdev->dev), err); - kfree(vdev); + put_device(&vdev->dev); return NULL; } if (vdev->dp) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 00fcf81f2c56..c07f492b871a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -52,6 +52,7 @@ config X86 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_FILTER_PGPROT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 @@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_HAS_FILTER_PGPROT + def_bool y + config HAVE_SETUP_PER_CPU_AREA def_bool y diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 9af927e59d49..9de7f1e1dede 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat) pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ + pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ - pushq $0 /* pt_regs->r9 = 0 */ + pushq %r9 /* pt_regs->r9 */ xorl %r9d, %r9d /* nospec r9 */ - pushq $0 /* pt_regs->r10 = 0 */ + pushq %r10 /* pt_regs->r10 */ xorl %r10d, %r10d /* nospec r10 */ - pushq $0 /* pt_regs->r11 = 0 */ + pushq %r11 /* pt_regs->r11 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ diff --git a/arch/x86/entry/vdso/vdso32/vdso-fakesections.c b/arch/x86/entry/vdso/vdso32/vdso-fakesections.c deleted file mode 100644 index 541468e25265..000000000000 --- a/arch/x86/entry/vdso/vdso32/vdso-fakesections.c +++ /dev/null @@ -1 +0,0 @@ -#include "../vdso-fakesections.c" diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index a6006e7bb729..45b2b1c93d04 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -27,6 +27,7 @@ #include <linux/cpu.h> #include <linux/bitops.h> #include <linux/device.h> +#include <linux/nospec.h> #include <asm/apic.h> #include <asm/stacktrace.h> @@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) config = attr->config; - cache_type = (config >> 0) & 0xff; + cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; + cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX); cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; + cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX); cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; + cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); val = hw_cache_event_ids[cache_type][cache_op][cache_result]; @@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event) if (attr->config >= x86_pmu.max_events) return -EINVAL; + attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); + /* * The generic map: */ diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 607bf565a90c..707b2a96e516 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = NULL; - flip_smm_bit(&x86_pmu.attr_freeze_on_smi); + if (x86_pmu.version > 1) + flip_smm_bit(&x86_pmu.attr_freeze_on_smi); if (!cpuc->shared_regs) return; @@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_dying = intel_pmu_cpu_dying, }; +static struct attribute *intel_pmu_attrs[]; + static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = { .format_attrs = intel_arch3_formats_attr, .events_sysfs_show = intel_event_sysfs_show, + .attrs = intel_pmu_attrs, + .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, @@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void) x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); - - x86_pmu.attrs = intel_pmu_attrs; /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events, when not running in a hypervisor: diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9aca448bb8e6..9f8084f18d58 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -92,6 +92,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/perf_event.h> +#include <linux/nospec.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "../perf_event.h" @@ -302,6 +303,7 @@ static int cstate_pmu_event_init(struct perf_event *event) } else if (event->pmu == &cstate_pkg_pmu) { if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) return -EINVAL; + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); if (!pkg_msr[cfg].attr) return -EINVAL; event->hw.event_base = pkg_msr[cfg].msr; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c98b943e58b4..77076a102e34 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +static struct intel_uncore_type bdx_uncore_sbox = { + .name = "sbox", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .event_ctl = HSWEP_S0_MSR_PMON_CTL0, + .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_SBOX_MSR_OFFSET, + .ops = &hswep_uncore_sbox_msr_ops, + .format_group = &hswep_uncore_sbox_format_group, +}; + +#define BDX_MSR_UNCORE_SBOX 3 + static struct intel_uncore_type *bdx_msr_uncores[] = { &bdx_uncore_ubox, &bdx_uncore_cbox, &hswep_uncore_pcu, + &bdx_uncore_sbox, NULL, }; @@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { void bdx_uncore_cpu_init(void) { + int pkg = topology_phys_to_logical_pkg(0); + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; + /* BDX-DE doesn't have SBOX */ + if (boot_cpu_data.x86_model == 86) { + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + /* Detect systems with no SBOXes */ + } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { + struct pci_dev *pdev; + u32 capid4; + + pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; + pci_read_config_dword(pdev, 0x94, &capid4); + if (((capid4 >> 6) & 0x3) == 0) + bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + } hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } @@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), }, + { /* PCU.3 (for Capability registers) */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + HSWEP_PCI_PCU_3), + }, { /* end: all zeroes */ } }; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index e7edf19e64c2..b4771a6ddbc1 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/perf_event.h> +#include <linux/nospec.h> #include <asm/intel-family.h> enum perf_msr_id { @@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event) if (event->attr.type != event->pmu->type) return -ENOENT; - if (cfg >= PERF_MSR_EVENT_MAX) - return -EINVAL; - /* unsupported modes and filters */ if (event->attr.exclude_user || event->attr.exclude_kernel || @@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event) event->attr.sample_period) /* no sampling */ return -EINVAL; + if (cfg >= PERF_MSR_EVENT_MAX) + return -EINVAL; + + cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX); + if (!msr[cfg].attr) return -EINVAL; diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 386a6900e206..219faaec51df 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -136,7 +136,6 @@ #endif #ifndef __ASSEMBLY__ -#ifndef __BPF__ /* * This output constraint should be used for any inline asm which has a "call" * instruction. Otherwise the asm may be inserted before the frame pointer @@ -146,6 +145,5 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #endif -#endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index d554c11e01ff..578793e97431 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -320,6 +320,7 @@ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 09ad88572746..cc8f8fcf9b4a 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs); #endif /* CONFIG_FUNCTION_TRACER */ -#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) +#ifndef __ASSEMBLY__ + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) +{ + /* + * Compare the symbol name with the system call name. Skip the + * "__x64_sys", "__ia32_sys" or simple "sys" prefix. + */ + return !strcmp(sym + 3, name + 3) || + (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) || + (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)); +} + +#ifndef COMPILE_OFFSETS #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) #include <asm/compat.h> @@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) return false; } #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ -#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ +#endif /* !COMPILE_OFFSETS */ +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FTRACE_H */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 404c5fdff859..548d90bbf919 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -34,11 +34,6 @@ * (0x80 is the syscall vector, 0x30-0x3f are for ISA) */ #define FIRST_EXTERNAL_VECTOR 0x20 -/* - * We start allocating at 0x21 to spread out vectors evenly between - * priority levels. (0x80 is the syscall vector) - */ -#define VECTOR_OFFSET_START 1 /* * Reserve the lowest usable vector (and hence lowest priority) 0x20 for @@ -119,8 +114,6 @@ #define FIRST_SYSTEM_VECTOR NR_VECTORS #endif -#define FPU_IRQ 13 - /* * Size the maximum number of interrupts. * diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h index b885a961a150..a34897aef2c2 100644 --- a/arch/x86/include/asm/jailhouse_para.h +++ b/arch/x86/include/asm/jailhouse_para.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL2.0 */ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Jailhouse paravirt detection diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 949c977bc4c9..c25775fad4ed 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1013,6 +1013,7 @@ struct kvm_x86_ops { bool (*has_wbinvd_exit)(void); + u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5f49b4ff0c24..f1633de5a675 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) +static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + return canon_pgprot(prot); +} + static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, enum page_cache_mode pcm, enum page_cache_mode new_pcm) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index d5c21a382475..adb47552e6bb 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d; #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) -#define __VMALLOC_BASE_L4 0xffffc90000000000 -#define __VMALLOC_BASE_L5 0xffa0000000000000 +#define __VMALLOC_BASE_L4 0xffffc90000000000UL +#define __VMALLOC_BASE_L5 0xffa0000000000000UL #define VMALLOC_SIZE_TB_L4 32UL #define VMALLOC_SIZE_TB_L5 12800UL -#define __VMEMMAP_BASE_L4 0xffffea0000000000 -#define __VMEMMAP_BASE_L5 0xffd4000000000000 +#define __VMEMMAP_BASE_L4 0xffffea0000000000UL +#define __VMEMMAP_BASE_L5 0xffd4000000000000UL #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 4fa4206029e3..21a114914ba4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, extern void enable_sep_cpu(void); extern int sysenter_setup(void); -extern void early_trap_init(void); void early_trap_pf_init(void); /* Defined in head.S */ extern struct desc_ptr early_gdt_descr; -extern void cpu_set_gdt(int); extern void switch_to_new_gdt(int); extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h index 809134c644a6..90ab9a795b49 100644 --- a/arch/x86/include/uapi/asm/msgbuf.h +++ b/arch/x86/include/uapi/asm/msgbuf.h @@ -1 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X64_MSGBUF_H +#define __ASM_X64_MSGBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include <asm-generic/msgbuf.h> +#else +/* + * The msqid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ + __kernel_ulong_t msg_qnum; /* number of messages in queue */ + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +#endif + +#endif /* __ASM_GENERIC_MSGBUF_H */ diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h index 83c05fc2de38..644421f3823b 100644 --- a/arch/x86/include/uapi/asm/shmbuf.h +++ b/arch/x86/include/uapi/asm/shmbuf.h @@ -1 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X86_SHMBUF_H +#define __ASM_X86_SHMBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include <asm-generic/shmbuf.h> +#else +/* + * The shmid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + __kernel_ulong_t shm_nattch; /* no. of current attaches */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +struct shminfo64 { + __kernel_ulong_t shmmax; + __kernel_ulong_t shmmin; + __kernel_ulong_t shmmni; + __kernel_ulong_t shmseg; + __kernel_ulong_t shmall; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; + +#endif + +#endif /* __ASM_X86_SHMBUF_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index dde444f932c1..3b20607d581b 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; + /* Ignore invalid ID */ + if (apic_id == 0xffffffff) + return 0; + /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8a5b185735e1..ce243f7d2d4e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -848,6 +848,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_power = edx; } + if (c->extended_cpuid_level >= 0x80000008) { + cpuid(0x80000008, &eax, &ebx, &ecx, &edx); + c->x86_capability[CPUID_8000_0008_EBX] = ebx; + } + if (c->extended_cpuid_level >= 0x8000000a) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); @@ -871,7 +876,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c) c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_phys_bits = eax & 0xff; - c->x86_capability[CPUID_8000_0008_EBX] = ebx; } #ifdef CONFIG_X86_32 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b9693b80fc21..60d1897041da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 10c4fc2c91f8..77e201301528 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -564,14 +564,12 @@ static int __reload_late(void *info) apply_microcode_local(&err); spin_unlock(&update_lock); + /* siblings return UCODE_OK because their engine got updated already */ if (err > UCODE_NFOUND) { pr_warn("Error reloading microcode on CPU %d\n", cpu); - return -1; - /* siblings return UCODE_OK because their engine got updated already */ + ret = -1; } else if (err == UCODE_UPDATED || err == UCODE_OK) { ret = 1; - } else { - return ret; } /* diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 32b8e5724f96..1c2cfa0644aa 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -485,7 +485,6 @@ static void show_saved_mc(void) */ static void save_mc_for_early(u8 *mc, unsigned int size) { -#ifdef CONFIG_HOTPLUG_CPU /* Synchronization during CPU hotplug. */ static DEFINE_MUTEX(x86_cpu_microcode_mutex); @@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size) show_saved_mc(); mutex_unlock(&x86_cpu_microcode_mutex); -#endif } static bool load_builtin_intel_microcode(struct cpio_data *cp) diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index fa183a131edc..a15fe0e92cf9 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL2.0 +// SPDX-License-Identifier: GPL-2.0 /* * Jailhouse paravirt_ops implementation * diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 3182908b7e6c..7326078eaa7a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, * little bit simple */ efi_map_sz = efi_get_runtime_map_size(); - efi_map_sz = ALIGN(efi_map_sz, 16); params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); - kbuf.bufsz = params_cmdline_sz + efi_map_sz + + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + sizeof(struct setup_data) + sizeof(struct efi_setup_data); @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; - efi_setup_data_offset = efi_map_offset + efi_map_sz; + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index d41d896481b8..c9b14020f4dd 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -166,7 +166,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) */ pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); /* Filter out unsuppored __PAGE_KERNEL* bits: */ - pgprot_val(pte_prot) |= __supported_pte_mask; + pgprot_val(pte_prot) &= __supported_pte_mask; pte = pfn_pte(pfn, pte_prot); set_pte_at(mm, va, ptep, pte); pte_unmap_unlock(ptep, ptl); diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c deleted file mode 100644 index ac7ea3a8242f..000000000000 --- a/arch/x86/kernel/pci-nommu.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Fallback functions when the main IOMMU code is not compiled in. This - code is roughly equivalent to i386. */ -#include <linux/dma-direct.h> -#include <linux/scatterlist.h> -#include <linux/string.h> -#include <linux/gfp.h> -#include <linux/pci.h> -#include <linux/mm.h> - -#include <asm/processor.h> -#include <asm/iommu.h> -#include <asm/dma.h> - -#define NOMMU_MAPPING_ERROR 0 - -static int -check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) -{ - if (hwdev && !dma_capable(hwdev, bus, size)) { - if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) - printk(KERN_ERR - "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", - name, (long long)bus, size, - (long long)*hwdev->dma_mask); - return 0; - } - return 1; -} - -static dma_addr_t nommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset; - WARN_ON(size == 0); - if (!check_addr("map_single", dev, bus, size)) - return NOMMU_MAPPING_ERROR; - return bus; -} - -/* Map a set of buffers described by scatterlist in streaming - * mode for DMA. This is the scatter-gather version of the - * above pci_map_single interface. Here the scatter gather list - * elements are each tagged with the appropriate dma address - * and length. They are obtained via sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for pci_map_single are - * the same here. - */ -static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - WARN_ON(nents == 0 || sg[0].length == 0); - - for_each_sg(sg, s, nents, i) { - BUG_ON(!sg_page(s)); - s->dma_address = sg_phys(s); - if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) - return 0; - s->dma_length = s->length; - } - return nents; -} - -static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == NOMMU_MAPPING_ERROR; -} - -const struct dma_map_ops nommu_dma_ops = { - .alloc = dma_generic_alloc_coherent, - .free = dma_generic_free_coherent, - .map_sg = nommu_map_sg, - .map_page = nommu_map_page, - .is_phys = 1, - .mapping_error = nommu_mapping_error, - .dma_supported = x86_dma_supported, -}; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6285697b6e56..5c623dfe39d1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -50,6 +50,7 @@ #include <linux/init_ohci1394_dma.h> #include <linux/kvm_para.h> #include <linux/dma-contiguous.h> +#include <xen/xen.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void) high = true; } + if (xen_pv_domain()) { + pr_info("Ignoring crashkernel for a Xen PV domain\n"); + return; + } + /* 0 means: find the address automatically */ if (crash_base <= 0) { /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ff99e2b6fc54..0f1cbb042f49 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -77,6 +77,8 @@ #include <asm/i8259.h> #include <asm/misc.h> #include <asm/qspinlock.h> +#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) return false; } +/* + * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. + * + * These are Intel CPUs that enumerate an LLC that is shared by + * multiple NUMA nodes. The LLC on these systems is shared for + * off-package data access but private to the NUMA node (half + * of the package) for on-package access. + * + * CPUID (the source of the information about the LLC) can only + * enumerate the cache as being shared *or* unshared, but not + * this particular configuration. The CPU in this case enumerates + * the cache to be shared across the entire package (spanning both + * NUMA nodes). + */ + +static const struct x86_cpu_id snc_cpu[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X }, + {} +}; + static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) { int cpu1 = c->cpu_index, cpu2 = o->cpu_index; - if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) - return topology_sane(c, o, "llc"); + /* Do not match if we do not have a valid APICID for cpu: */ + if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) + return false; - return false; + /* Do not match if LLC id does not match: */ + if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) + return false; + + /* + * Allow the SNC topology without warning. Return of false + * means 'c' does not share the LLC of 'o'. This will be + * reflected to userspace. + */ + if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) + return false; + + return topology_sane(c, o, "llc"); } /* @@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = { /* * Set if a package/die has multiple NUMA nodes inside. - * AMD Magny-Cours and Intel Cluster-on-Die have this. + * AMD Magny-Cours, Intel Cluster-on-Die, and Intel + * Sub-NUMA Clustering have this. */ static bool x86_has_numa_in_package; @@ -1536,6 +1571,8 @@ static inline void mwait_play_dead(void) void *mwait_ptr; int i; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; if (!this_cpu_has(X86_FEATURE_CLFLUSH)) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ef32297ff17e..74392d9d51e0 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) hpet2 -= hpet1; tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); do_div(tmp, 1000000); - do_div(deltatsc, tmp); + deltatsc = div64_u64(deltatsc, tmp); return (unsigned long) deltatsc; } @@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = { .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, + .list = LIST_HEAD_INIT(clocksource_tsc_early.list), }; /* @@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = { .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, + .list = LIST_HEAD_INIT(clocksource_tsc.list), }; void mark_tsc_unstable(char *reason) @@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason) clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to %s\n", reason); - /* Change only the rating, when not registered */ - if (clocksource_tsc.mult) { - clocksource_mark_unstable(&clocksource_tsc); - } else { - clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; - clocksource_tsc.rating = 0; - } + + clocksource_mark_unstable(&clocksource_tsc_early); + clocksource_mark_unstable(&clocksource_tsc); } EXPORT_SYMBOL_GPL(mark_tsc_unstable); @@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work) /* Don't bother refining TSC on unstable systems */ if (tsc_unstable) - return; + goto unreg; /* * Since the work is started early in boot, we may be @@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work) out: if (tsc_unstable) - return; + goto unreg; if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); +unreg: clocksource_unregister(&clocksource_tsc_early); } @@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void) if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) return 0; - if (check_tsc_unstable()) - return 0; + if (tsc_unstable) + goto unreg; if (tsc_clocksource_reliable) clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; @@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void) if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); +unreg: clocksource_unregister(&clocksource_tsc_early); return 0; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 70dcb5548022..b74c9c1405b9 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1463,23 +1463,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic) local_irq_restore(flags); } -static void start_sw_period(struct kvm_lapic *apic) -{ - if (!apic->lapic_timer.period) - return; - - if (apic_lvtt_oneshot(apic) && - ktime_after(ktime_get(), - apic->lapic_timer.target_expiration)) { - apic_timer_expired(apic); - return; - } - - hrtimer_start(&apic->lapic_timer.timer, - apic->lapic_timer.target_expiration, - HRTIMER_MODE_ABS_PINNED); -} - static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor) { ktime_t now, remaining; @@ -1546,6 +1529,26 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic) apic->lapic_timer.period); } +static void start_sw_period(struct kvm_lapic *apic) +{ + if (!apic->lapic_timer.period) + return; + + if (ktime_after(ktime_get(), + apic->lapic_timer.target_expiration)) { + apic_timer_expired(apic); + + if (apic_lvtt_oneshot(apic)) + return; + + advance_periodic_target_expiration(apic); + } + + hrtimer_start(&apic->lapic_timer.timer, + apic->lapic_timer.target_expiration, + HRTIMER_MODE_ABS_PINNED); +} + bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b58787daf9f8..1fc05e428aba 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1423,12 +1423,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) seg->base = 0; } +static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (is_guest_mode(vcpu)) + return svm->nested.hsave->control.tsc_offset; + + return vcpu->arch.tsc_offset; +} + static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; if (is_guest_mode(vcpu)) { + /* Write L1's TSC offset. */ g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; @@ -3322,6 +3333,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) /* Restore the original control entries */ copy_vmcb_control_area(vmcb, hsave); + svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); @@ -3482,10 +3494,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, /* We don't want to see VMMCALLs from a nested guest */ clr_intercept(svm, INTERCEPT_VMMCALL); + svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; + svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; + svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm->vmcb->control.int_state = nested_vmcb->control.int_state; - svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; @@ -4035,12 +4049,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) struct vcpu_svm *svm = to_svm(vcpu); switch (msr_info->index) { - case MSR_IA32_TSC: { - msr_info->data = svm->vmcb->control.tsc_offset + - kvm_scale_tsc(vcpu, rdtsc()); - - break; - } case MSR_STAR: msr_info->data = svm->vmcb->save.star; break; @@ -4193,9 +4201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->vmcb->save.g_pat = data; mark_dirty(svm->vmcb, VMCB_NPT); break; - case MSR_IA32_TSC: - kvm_write_tsc(vcpu, msr); - break; case MSR_IA32_SPEC_CTRL: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) @@ -5265,9 +5270,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, } if (!ret && svm) { - trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, - host_irq, e->gsi, - vcpu_info.vector, + trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, + e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); } @@ -7102,6 +7106,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .has_wbinvd_exit = svm_has_wbinvd_exit, + .read_l1_tsc_offset = svm_read_l1_tsc_offset, .write_tsc_offset = svm_write_tsc_offset, .set_tdp_cr3 = set_tdp_cr3, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aafcc9881e88..c7668806163f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx) vmx_update_msr_bitmap(&vmx->vcpu); } -/* - * reads and returns guest's timestamp counter "register" - * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset - * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 - */ -static u64 guest_read_tsc(struct kvm_vcpu *vcpu) +static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) { - u64 host_tsc, tsc_offset; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - host_tsc = rdtsc(); - tsc_offset = vmcs_read64(TSC_OFFSET); - return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; + if (is_guest_mode(vcpu) && + (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) + return vcpu->arch.tsc_offset - vmcs12->tsc_offset; + + return vcpu->arch.tsc_offset; } /* @@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_info); - case MSR_IA32_TSC: - msr_info->data = guest_read_tsc(vcpu); - break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && @@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; - case MSR_IA32_TSC: - kvm_write_tsc(vcpu, msr_info); - break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && @@ -4553,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); } -static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) -{ - if (enable_ept) - vmx_flush_tlb(vcpu, true); -} - static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; @@ -9287,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); @@ -9315,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) !nested_cpu_has2(get_vmcs12(&vmx->vcpu), SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } } @@ -10608,6 +10593,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, return true; } +static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + !page_address_valid(vcpu, vmcs12->apic_access_addr)) + return -EINVAL; + else + return 0; +} + static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -11176,11 +11171,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); } - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) - vmcs_write64(TSC_OFFSET, - vcpu->arch.tsc_offset + vmcs12->tsc_offset); - else - vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); @@ -11222,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } } else if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } /* @@ -11299,6 +11291,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -11420,6 +11415,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 msr_entry_idx; u32 exit_qual; + int r; enter_guest_mode(vcpu); @@ -11429,26 +11425,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_segment_cache_clear(vmx); - if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { - leave_guest_mode(vcpu); - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_INVALID_STATE, exit_qual); - return 1; - } + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset += vmcs12->tsc_offset; + + r = EXIT_REASON_INVALID_STATE; + if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) + goto fail; nested_get_vmcs12_pages(vcpu, vmcs12); + r = EXIT_REASON_MSR_LOAD_FAIL; msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); - if (msr_entry_idx) { - leave_guest_mode(vcpu); - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); - return 1; - } + if (msr_entry_idx) + goto fail; /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point @@ -11457,6 +11448,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 0; + +fail: + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; + leave_guest_mode(vcpu); + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); + return 1; } /* @@ -12028,6 +12027,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, leave_guest_mode(vcpu); + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; + if (likely(!vmx->fail)) { if (exit_reason == -1) sync_vmcs12(vcpu, vmcs12); @@ -12065,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, } else if (!nested_cpu_has_ept(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } /* This is needed for same reason as it was needed in prepare_vmcs02 */ @@ -12224,10 +12226,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift, static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - u64 tscl = rdtsc(); - u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); - u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; + struct vcpu_vmx *vmx; + u64 tscl, guest_tscl, delta_tsc; + + if (kvm_mwait_in_guest(vcpu->kvm)) + return -EOPNOTSUPP; + + vmx = to_vmx(vcpu); + tscl = rdtsc(); + guest_tscl = kvm_read_l1_tsc(vcpu, tscl); + delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; /* Convert to host delta tsc if tsc scaling is enabled */ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && @@ -12533,7 +12541,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; - trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) @@ -12712,6 +12720,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, + .read_l1_tsc_offset = vmx_read_l1_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset, .set_tdp_cr3 = vmx_set_cr3, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2ff74b12ec4..51ecd381793b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { - u64 curr_offset = vcpu->arch.tsc_offset; + u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } @@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { - return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); + u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); + + return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); } EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); @@ -2362,6 +2364,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vcpu->arch.smbase = data; break; + case MSR_IA32_TSC: + kvm_write_tsc(vcpu, msr_info); + break; case MSR_SMI_COUNT: if (!msr_info->host_initiated) return 1; @@ -2605,6 +2610,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UCODE_REV: msr_info->data = vcpu->arch.microcode_version; break; + case MSR_IA32_TSC: + msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; + break; case MSR_MTRRcap: case 0x200 ... 0x2ff: return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); @@ -2819,7 +2827,8 @@ out: static inline bool kvm_can_mwait_in_guest(void) { return boot_cpu_has(X86_FEATURE_MWAIT) && - !boot_cpu_has_bug(X86_BUG_MONITOR); + !boot_cpu_has_bug(X86_BUG_MONITOR) && + boot_cpu_has(X86_FEATURE_ARAT); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 7d35ce672989..c9492f764902 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) __rem; \ }) -#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) -#define KVM_X86_DISABLE_EXITS_HTL (1 << 1) -#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) -#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ - KVM_X86_DISABLE_EXITS_HTL | \ - KVM_X86_DISABLE_EXITS_PAUSE) - static inline bool kvm_mwait_in_guest(struct kvm *kvm) { return kvm->arch.mwait_in_guest; diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 62a7e9f65dec..cc7ff5957194 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/highmem.h> #include <asm/pgtable.h> @@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, pgprotval_t eff_in, unsigned long P) { int i; - pte_t *start; + pte_t *pte; pgprotval_t prot, eff; - start = (pte_t *)pmd_page_vaddr(addr); for (i = 0; i < PTRS_PER_PTE; i++) { - prot = pte_flags(*start); - eff = effective_prot(eff_in, prot); st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); + pte = pte_offset_map(&addr, st->current_address); + prot = pte_flags(*pte); + eff = effective_prot(eff_in, prot); note_page(m, st, __pgprot(prot), eff, 5); - start++; + pte_unmap(pte); } } #ifdef CONFIG_KASAN diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 0f3d50f4c48c..3bded76e8d5c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m) static inline void split_page_count(int level) { } #endif +static inline int +within(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr < end; +} + +static inline int +within_inclusive(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr <= end; +} + #ifdef CONFIG_X86_64 static inline unsigned long highmap_start_pfn(void) @@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void) return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; } -#endif - -static inline int -within(unsigned long addr, unsigned long start, unsigned long end) +static bool __cpa_pfn_in_highmap(unsigned long pfn) { - return addr >= start && addr < end; + /* + * Kernel text has an alias mapping at a high address, known + * here as "highmap". + */ + return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); } -static inline int -within_inclusive(unsigned long addr, unsigned long start, unsigned long end) +#else + +static bool __cpa_pfn_in_highmap(unsigned long pfn) { - return addr >= start && addr <= end; + /* There is no highmap on 32-bit */ + return false; } +#endif + /* * Flushing functions */ @@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg) static void cpa_flush_all(unsigned long cache) { - BUG_ON(irqs_disabled()); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); on_each_cpu(__cpa_flush_all, (void *) cache, 1); } @@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ #endif - BUG_ON(irqs_disabled()); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); @@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, cpa->numpages = 1; cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; return 0; + + } else if (__cpa_pfn_in_highmap(cpa->pfn)) { + /* Faults in the highmap are OK, so do not warn: */ + return -EFAULT; } else { WARN(1, KERN_WARNING "CPA: called for zero pte. " "vaddr = %lx cpa->vaddr = %lx\n", vaddr, @@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa) * to touch the high mapped kernel as well: */ if (!within(vaddr, (unsigned long)_text, _brk_end) && - within_inclusive(cpa->pfn, highmap_start_pfn(), - highmap_end_pfn())) { + __cpa_pfn_in_highmap(cpa->pfn)) { unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; alias_cpa = *cpa; diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index f1fd52f449e0..4d418e705878 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void) if (boot_cpu_has(X86_FEATURE_K8)) return false; + /* + * RANDSTRUCT derives its hardening benefits from the + * attacker's lack of knowledge about the layout of kernel + * data structures. Keep the kernel image non-global in + * cases where RANDSTRUCT is in use to help keep the layout a + * secret. + */ + if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT)) + return false; + return true; } @@ -430,12 +440,24 @@ static inline bool pti_kernel_image_global_ok(void) */ void pti_clone_kernel_text(void) { + /* + * rodata is part of the kernel image and is normally + * readable on the filesystem or on the web. But, do not + * clone the areas past rodata, they might contain secrets. + */ unsigned long start = PFN_ALIGN(_text); - unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); + unsigned long end = (unsigned long)__end_rodata_hpage_align; if (!pti_kernel_image_global_ok()) return; + pr_debug("mapping partial kernel image into user address space\n"); + + /* + * Note that this will undo _some_ of the work that + * pti_set_kernel_image_nonglobal() did to clear the + * global bit. + */ pti_clone_pmds(start, end, _PAGE_RW); } @@ -458,8 +480,6 @@ void pti_set_kernel_image_nonglobal(void) if (pti_kernel_image_global_ok()) return; - pr_debug("set kernel image non-global\n"); - set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index b725154182cc..263c8453815e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1027,7 +1027,17 @@ emit_cond_jmp: /* convert BPF opcode to x86 */ break; case BPF_JMP | BPF_JA: - jmp_offset = addrs[i + insn->off] - addrs[i]; + if (insn->off == -1) + /* -1 jmp instructions will always jump + * backwards two bytes. Explicitly handling + * this case avoids wasting too many passes + * when there are long sequences of replaced + * dead code. + */ + jmp_offset = -2; + else + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (!jmp_offset) /* optimize out nop jumps */ break; @@ -1226,6 +1236,7 @@ skip_init_addrs: for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { +out_image: image = NULL; if (header) bpf_jit_binary_free(header); @@ -1236,8 +1247,7 @@ skip_init_addrs: if (proglen != oldproglen) { pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", proglen, oldproglen); - prog = orig_prog; - goto out_addrs; + goto out_image; } break; } @@ -1273,7 +1283,7 @@ skip_init_addrs: prog = orig_prog; } - if (!prog->is_func || extra_pass) { + if (!image || !prog->is_func || extra_pass) { out_addrs: kfree(addrs); kfree(jit_data); diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 48b14b534897..ccf4a49bb065 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -98,7 +98,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); } else { /* No p4d for 4-level paging: point the pgd to the pud page table */ - pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); + pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); } diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 826898701045..19c1ff542387 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -65,6 +65,19 @@ static void __init xen_hvm_init_mem_mapping(void) { early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); + + /* + * The virtual address of the shared_info page has changed, so + * the vcpu_info pointer for VCPU 0 is now stale. + * + * The prepare_boot_cpu callback will re-initialize it via + * xen_vcpu_setup, but we can't rely on that to be called for + * old Xen versions (xen_have_vector_callback == 0). + * + * It is, in any case, bad to have a stale vcpu_info pointer + * so reset it now. + */ + xen_vcpu_info_reset(0); } static void __init init_hvm_pv_info(void) diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index c36d23aa6c35..357969a3697c 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -421,45 +421,33 @@ static void xen_load_gdt(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; - unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); - unsigned long frames[pages]; - int f; - - /* - * A GDT can be up to 64k in size, which corresponds to 8192 - * 8-byte entries, or 16 4k pages.. - */ + unsigned long pfn, mfn; + int level; + pte_t *ptep; + void *virt; - BUG_ON(size > 65536); + /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */ + BUG_ON(size > PAGE_SIZE); BUG_ON(va & ~PAGE_MASK); - for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { - int level; - pte_t *ptep; - unsigned long pfn, mfn; - void *virt; - - /* - * The GDT is per-cpu and is in the percpu data area. - * That can be virtually mapped, so we need to do a - * page-walk to get the underlying MFN for the - * hypercall. The page can also be in the kernel's - * linear range, so we need to RO that mapping too. - */ - ptep = lookup_address(va, &level); - BUG_ON(ptep == NULL); - - pfn = pte_pfn(*ptep); - mfn = pfn_to_mfn(pfn); - virt = __va(PFN_PHYS(pfn)); + /* + * The GDT is per-cpu and is in the percpu data area. + * That can be virtually mapped, so we need to do a + * page-walk to get the underlying MFN for the + * hypercall. The page can also be in the kernel's + * linear range, so we need to RO that mapping too. + */ + ptep = lookup_address(va, &level); + BUG_ON(ptep == NULL); - frames[f] = mfn; + pfn = pte_pfn(*ptep); + mfn = pfn_to_mfn(pfn); + virt = __va(PFN_PHYS(pfn)); - make_lowmem_page_readonly((void *)va); - make_lowmem_page_readonly(virt); - } + make_lowmem_page_readonly((void *)va); + make_lowmem_page_readonly(virt); - if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) + if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct))) BUG(); } @@ -470,34 +458,22 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; - unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); - unsigned long frames[pages]; - int f; - - /* - * A GDT can be up to 64k in size, which corresponds to 8192 - * 8-byte entries, or 16 4k pages.. - */ + unsigned long pfn, mfn; + pte_t pte; - BUG_ON(size > 65536); + /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */ + BUG_ON(size > PAGE_SIZE); BUG_ON(va & ~PAGE_MASK); - for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { - pte_t pte; - unsigned long pfn, mfn; + pfn = virt_to_pfn(va); + mfn = pfn_to_mfn(pfn); - pfn = virt_to_pfn(va); - mfn = pfn_to_mfn(pfn); + pte = pfn_pte(pfn, PAGE_KERNEL_RO); - pte = pfn_pte(pfn, PAGE_KERNEL_RO); - - if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) - BUG(); - - frames[f] = mfn; - } + if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) + BUG(); - if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) + if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct))) BUG(); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index f0ecd98509d8..771ae9730ac6 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio) bool new_queue = false; bool bfqq_already_existing = false, split = false; - if (!rq->elv.icq) + /* + * Even if we don't have an icq attached, we should still clear + * the scheduler pointers, as they might point to previously + * allocated bic/bfqq structs. + */ + if (!rq->elv.icq) { + rq->elv.priv[0] = rq->elv.priv[1] = NULL; return; + } + bic = icq_to_bic(rq->elv.icq); spin_lock_irq(&bfqd->lock); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1c16694ae145..eb85cb87c40f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q) preloaded = !radix_tree_preload(GFP_KERNEL); - /* - * Make sure the root blkg exists and count the existing blkgs. As - * @q is bypassing at this point, blkg_lookup_create() can't be - * used. Open code insertion. - */ + /* Make sure the root blkg exists. */ rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); + if (IS_ERR(blkg)) + goto err_unlock; + q->root_blkg = blkg; + q->root_rl.blkg = blkg; spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); - if (IS_ERR(blkg)) - return PTR_ERR(blkg); - - q->root_blkg = blkg; - q->root_rl.blkg = blkg; - ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); @@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q) spin_unlock_irq(q->queue_lock); } return ret; + +err_unlock: + spin_unlock_irq(q->queue_lock); + rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + return PTR_ERR(blkg); } /** @@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q, __clear_bit(pol->plid, q->blkcg_pols); list_for_each_entry(blkg, &q->blkg_list, q_node) { - /* grab blkcg lock too while removing @pd from @blkg */ - spin_lock(&blkg->blkcg->lock); - if (blkg->pd[pol->plid]) { if (!blkg->pd[pol->plid]->offline && pol->pd_offline_fn) { @@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q, pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } - - spin_unlock(&blkg->blkcg->lock); } spin_unlock_irq(q->queue_lock); diff --git a/block/blk-core.c b/block/blk-core.c index 806ce2442819..85909b431eb0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->part = NULL; seqcount_init(&rq->gstate_seq); u64_stats_init(&rq->aborted_gstate_sync); + /* + * See comment of blk_mq_init_request + */ + WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC); } EXPORT_SYMBOL(blk_rq_init); @@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) while (true) { bool success = false; - int ret; rcu_read_lock(); if (percpu_ref_tryget_live(&q->q_usage_counter)) { @@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) */ smp_rmb(); - ret = wait_event_interruptible(q->mq_freeze_wq, - (atomic_read(&q->mq_freeze_depth) == 0 && - (preempt || !blk_queue_preempt_only(q))) || - blk_queue_dying(q)); + wait_event(q->mq_freeze_wq, + (atomic_read(&q->mq_freeze_depth) == 0 && + (preempt || !blk_queue_preempt_only(q))) || + blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; - if (ret) - return ret; } } diff --git a/block/blk-mq.c b/block/blk-mq.c index 0dc9e341c2a7..9ce9cac16c3f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -95,18 +95,15 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, { struct mq_inflight *mi = priv; - if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) { - /* - * index[0] counts the specific partition that was asked - * for. index[1] counts the ones that are active on the - * whole device, so increment that if mi->part is indeed - * a partition, and not a whole device. - */ - if (rq->part == mi->part) - mi->inflight[0]++; - if (mi->part->partno) - mi->inflight[1]++; - } + /* + * index[0] counts the specific partition that was asked for. index[1] + * counts the ones that are active on the whole device, so increment + * that if mi->part is indeed a partition, and not a whole device. + */ + if (rq->part == mi->part) + mi->inflight[0]++; + if (mi->part->partno) + mi->inflight[1]++; } void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, @@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); } +static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, + bool reserved) +{ + struct mq_inflight *mi = priv; + + if (rq->part == mi->part) + mi->inflight[rq_data_dir(rq)]++; +} + +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + struct mq_inflight mi = { .part = part, .inflight = inflight, }; + + inflight[0] = inflight[1] = 0; + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi); +} + void blk_freeze_queue_start(struct request_queue *q) { int freeze_depth; @@ -2042,6 +2058,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, seqcount_init(&rq->gstate_seq); u64_stats_init(&rq->aborted_gstate_sync); + /* + * start gstate with gen 1 instead of 0, otherwise it will be equal + * to aborted_gstate, and be identified timed out by + * blk_mq_terminate_expired. + */ + WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC); + return 0; } @@ -2329,7 +2352,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, static void blk_mq_map_swqueue(struct request_queue *q) { - unsigned int i; + unsigned int i, hctx_idx; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; @@ -2346,8 +2369,23 @@ static void blk_mq_map_swqueue(struct request_queue *q) /* * Map software to hardware queues. + * + * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { + hctx_idx = q->mq_map[i]; + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[hctx_idx] && + !__blk_mq_alloc_rq_map(set, hctx_idx)) { + /* + * If tags initialization fail for some hctx, + * that hctx won't be brought online. In this + * case, remap the current ctx to hctx[0] which + * is guaranteed to always have tags allocated + */ + q->mq_map[i] = 0; + } + ctx = per_cpu_ptr(q->queue_ctx, i); hctx = blk_mq_map_queue(q, i); @@ -2359,8 +2397,21 @@ static void blk_mq_map_swqueue(struct request_queue *q) mutex_unlock(&q->sysfs_lock); queue_for_each_hw_ctx(q, hctx, i) { - /* every hctx should get mapped by at least one CPU */ - WARN_ON(!hctx->nr_ctx); + /* + * If no software queues are mapped to this hardware queue, + * disable it and free the request entries. + */ + if (!hctx->nr_ctx) { + /* Never unmap queue 0. We need it as a + * fallback in case of a new remap fails + * allocation + */ + if (i && set->tags[i]) + blk_mq_free_map_and_requests(set, i); + + hctx->tags = NULL; + continue; + } hctx->tags = set->tags[i]; WARN_ON(!hctx->tags); diff --git a/block/blk-mq.h b/block/blk-mq.h index 88c558f71819..e1bb420dc5d6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -7,6 +7,9 @@ struct blk_mq_tag_set; +/** + * struct blk_mq_ctx - State for a software queue facing the submitting CPUs + */ struct blk_mq_ctx { struct { spinlock_t lock; @@ -185,7 +188,9 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) } void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); + unsigned int inflight[2]); +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) { diff --git a/block/genhd.c b/block/genhd.c index dc7e089373b9..c4513fe1adda 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part, } } +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + if (q->mq_ops) { + blk_mq_in_flight_rw(q, part, inflight); + return; + } + + inflight[0] = atomic_read(&part->in_flight[0]); + inflight[1] = atomic_read(&part->in_flight[1]); +} + struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) { struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); diff --git a/block/partition-generic.c b/block/partition-generic.c index 08dabcd8b6ae..db57cced9b98 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev, jiffies_to_msecs(part_stat_read(p, time_in_queue))); } -ssize_t part_inflight_show(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct hd_struct *p = dev_to_part(dev); + struct request_queue *q = part_to_disk(p)->queue; + unsigned int inflight[2]; - return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), - atomic_read(&p->in_flight[1])); + part_in_flight_rw(q, p, inflight); + return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } #ifdef CONFIG_FAIL_MAKE_REQUEST diff --git a/crypto/api.c b/crypto/api.c index 1d5290c67108..0ee632bba064 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -204,9 +204,14 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, down_read(&crypto_alg_sem); alg = __crypto_alg_lookup(name, type | test, mask | test); - if (!alg && test) - alg = __crypto_alg_lookup(name, type, mask) ? - ERR_PTR(-ELIBBAD) : NULL; + if (!alg && test) { + alg = __crypto_alg_lookup(name, type, mask); + if (alg && !crypto_is_larval(alg)) { + /* Test failed */ + crypto_mod_put(alg); + alg = ERR_PTR(-ELIBBAD); + } + } up_read(&crypto_alg_sem); return alg; diff --git a/crypto/drbg.c b/crypto/drbg.c index 4faa2781c964..466a112a4446 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) if (!drbg) return; kzfree(drbg->Vbuf); + drbg->Vbuf = NULL; drbg->V = NULL; kzfree(drbg->Cbuf); + drbg->Cbuf = NULL; drbg->C = NULL; kzfree(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 76fb96966f7b..2f2e737be0f8 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void) return opregion; } +static bool dmi_is_desktop(void) +{ + const char *chassis_type; + + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + if (!chassis_type) + return false; + + if (!strcmp(chassis_type, "3") || /* 3: Desktop */ + !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ + !strcmp(chassis_type, "5") || /* 5: Pizza Box */ + !strcmp(chassis_type, "6") || /* 6: Mini Tower */ + !strcmp(chassis_type, "7") || /* 7: Tower */ + !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ + return true; + + return false; +} + int acpi_video_register(void) { int ret = 0; @@ -2143,8 +2162,12 @@ int acpi_video_register(void) * win8 ready (where we also prefer the native backlight driver, so * normally the acpi_video code should not register there anyways). */ - if (only_lcd == -1) - only_lcd = acpi_osi_is_win8(); + if (only_lcd == -1) { + if (dmi_is_desktop() && acpi_osi_is_win8()) + only_lcd = true; + else + only_lcd = false; + } dmi_check_system(video_dmi_table); diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index ebb626ffb5fa..4bde16fb97d8 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -12,23 +12,64 @@ #define pr_fmt(fmt) "ACPI: watchdog: " fmt #include <linux/acpi.h> +#include <linux/dmi.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include "internal.h" +static const struct dmi_system_id acpi_watchdog_skip[] = { + { + /* + * On Lenovo Z50-70 there are two issues with the WDAT + * table. First some of the instructions use RTC SRAM + * to store persistent information. This does not work well + * with Linux RTC driver. Second, more important thing is + * that the instructions do not actually reset the system. + * + * On this particular system iTCO_wdt seems to work just + * fine so we prefer that over WDAT for now. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033. + */ + .ident = "Lenovo Z50-70", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20354"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"), + }, + }, + {} +}; + +static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) +{ + const struct acpi_table_wdat *wdat = NULL; + acpi_status status; + + if (acpi_disabled) + return NULL; + + if (dmi_check_system(acpi_watchdog_skip)) + return NULL; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, + (struct acpi_table_header **)&wdat); + if (ACPI_FAILURE(status)) { + /* It is fine if there is no WDAT */ + return NULL; + } + + return wdat; +} + /** * Returns true if this system should prefer ACPI based watchdog instead of * the native one (which are typically the same hardware). */ bool acpi_has_watchdog(void) { - struct acpi_table_header hdr; - - if (acpi_disabled) - return false; - - return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr)); + return !!acpi_watchdog_get_wdat(); } EXPORT_SYMBOL_GPL(acpi_has_watchdog); @@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void) struct platform_device *pdev; struct resource *resources; size_t nresources = 0; - acpi_status status; int i; - status = acpi_get_table(ACPI_SIG_WDAT, 0, - (struct acpi_table_header **)&wdat); - if (ACPI_FAILURE(status)) { + wdat = acpi_watchdog_get_wdat(); + if (!wdat) { /* It is fine if there is no WDAT */ return; } diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index e1eee7a60fad..f1cc4f9d31cd 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -635,4 +635,26 @@ module_param_call(lid_init_state, NULL, 0644); MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); -module_acpi_driver(acpi_button_driver); +static int acpi_button_register_driver(struct acpi_driver *driver) +{ + /* + * Modules such as nouveau.ko and i915.ko have a link time dependency + * on acpi_lid_open(), and would therefore not be loadable on ACPI + * capable kernels booted in non-ACPI mode if the return value of + * acpi_bus_register_driver() is returned from here with ACPI disabled + * when this driver is built as a module. + */ + if (acpi_disabled) + return 0; + + return acpi_bus_register_driver(driver); +} + +static void acpi_button_unregister_driver(struct acpi_driver *driver) +{ + if (!acpi_disabled) + acpi_bus_unregister_driver(driver); +} + +module_driver(acpi_button_driver, acpi_button_register_driver, + acpi_button_unregister_driver); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index cc234e6a6297..970dd87d347c 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void) acpi_cmos_rtc_init(); acpi_container_init(); acpi_memory_hotplug_init(); + acpi_watchdog_init(); acpi_pnp_init(); acpi_int340x_thermal_init(); acpi_amba_init(); - acpi_watchdog_init(); acpi_init_lpit(); acpi_scan_add_handler(&generic_device_handler); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 99a1a650326d..974e58457697 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), }, }, + /* + * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using + * the Low Power S0 Idle firmware interface (see + * https://bugzilla.kernel.org/show_bug.cgi?id=199057). + */ + { + .callback = init_no_lps0, + .ident = "ThinkPad X1 Tablet(2016)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), + }, + }, {}, }; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 594c228d2f02..4a3ac31c07d0 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev, struct device_attribute *attr, char *buf) { struct amba_device *dev = to_amba_device(_dev); + ssize_t len; - if (!dev->driver_override) - return 0; - - return sprintf(buf, "%s\n", dev->driver_override); + device_lock(_dev); + len = sprintf(buf, "%s\n", dev->driver_override); + device_unlock(_dev); + return len; } static ssize_t driver_override_store(struct device *_dev, @@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev, const char *buf, size_t count) { struct amba_device *dev = to_amba_device(_dev); - char *driver_override, *old = dev->driver_override, *cp; + char *driver_override, *old, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); @@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev, if (cp) *cp = '\0'; + device_lock(_dev); + old = dev->driver_override; if (strlen(driver_override)) { dev->driver_override = driver_override; } else { kfree(driver_override); dev->driver_override = NULL; } + device_unlock(_dev); kfree(old); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 764b63a5aade..e578eee31589 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); + if (target_node && target_proc == proc) { + binder_user_error("%d:%d got transaction to context manager from process owning it\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } } if (!target_node) { /* diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1ff17799769d..6389c88b3500 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -698,7 +698,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); @@ -724,7 +724,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, bool online; int rc; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -788,7 +788,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); for (i = 0; i < 2; i++) { u16 val; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 4356ef1d28a8..824bd399f02e 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -350,7 +350,6 @@ struct ahci_host_priv { u32 em_msg_type; /* EM message type */ bool got_runtime_pm; /* Did we do pm_runtime_get? */ struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ - struct reset_control *rsts; /* Optional */ struct regulator **target_pwrs; /* Optional */ /* * If platform uses PHYs. There is a 1:1 relation between the port number and @@ -366,6 +365,13 @@ struct ahci_host_priv { * be overridden anytime before the host is activated. */ void (*start_engine)(struct ata_port *ap); + /* + * Optional ahci_stop_engine override, if not set this gets set to the + * default ahci_stop_engine during ahci_save_initial_config, this can + * be overridden anytime before the host is activated. + */ + int (*stop_engine)(struct ata_port *ap); + irqreturn_t (*irq_handler)(int irq, void *dev_instance); /* only required for per-port MSI(-X) support */ diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index de7128d81e9c..0045dacd814b 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); } +/** + * ahci_mvebu_stop_engine + * + * @ap: Target ata port + * + * Errata Ref#226 - SATA Disk HOT swap issue when connected through + * Port Multiplier in FIS-based Switching mode. + * + * To avoid the issue, according to design, the bits[11:8, 0] of + * register PxFBS are cleared when Port Command and Status (0x18) bit[0] + * changes its value from 1 to 0, i.e. falling edge of Port + * Command and Status bit[0] sends PULSE that resets PxFBS + * bits[11:8; 0]. + * + * This function is used to override function of "ahci_stop_engine" + * from libahci.c by adding the mvebu work around(WA) to save PxFBS + * value before the PxCMD ST write of 0, then restore PxFBS value. + * + * Return: 0 on success; Error code otherwise. + */ +int ahci_mvebu_stop_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp, port_fbs; + + tmp = readl(port_mmio + PORT_CMD); + + /* check if the HBA is idle */ + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) + return 0; + + /* save the port PxFBS register for later restore */ + port_fbs = readl(port_mmio + PORT_FBS); + + /* setting HBA to idle */ + tmp &= ~PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + + /* + * bit #15 PxCMD signal doesn't clear PxFBS, + * restore the PxFBS register right after clearing the PxCMD ST, + * no need to wait for the PxCMD bit #15. + */ + writel(port_fbs, port_mmio + PORT_FBS); + + /* wait for engine to stop. This could be as long as 500 msec */ + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); + if (tmp & PORT_CMD_LIST_ON) + return -EIO; + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) { @@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev) if (rc) return rc; + hpriv->stop_engine = ahci_mvebu_stop_engine; + if (of_device_is_compatible(pdev->dev.of_node, "marvell,armada-380-ahci")) { dram = mv_mbus_dram_info(); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 2685f28160f7..cfdef4d44ae9 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* * There is a errata on ls1021a Rev1.0 and Rev2.0 which is: diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index c2b5941d9184..ad58da7c9aff 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) PORT_CMD_ISSUE, 0x0, 1, 100)) return -EBUSY; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); ahci_start_fis_rx(ap); /* @@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = xgene_ahci_do_hardreset(link, deadline, &online); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 7adcf3caabd0..e5d90977caec 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) if (!hpriv->start_engine) hpriv->start_engine = ahci_start_engine; + if (!hpriv->stop_engine) + hpriv->stop_engine = ahci_stop_engine; + if (!hpriv->irq_handler) hpriv->irq_handler = ahci_single_level_irq_intr; } @@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap) static int ahci_deinit_port(struct ata_port *ap, const char **emsg) { int rc; + struct ahci_host_priv *hpriv = ap->host->private_data; /* disable DMA */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) { *emsg = "failed to stop engine"; return rc; @@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap) int busy, rc; /* stop engine */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) goto out_restart; @@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap) if (!(ap->pflags & ATA_PFLAG_FROZEN)) { /* restart engine */ - ahci_stop_engine(ap); + hpriv->stop_engine(ap); hpriv->start_engine(ap); } sata_pmp_error_handler(ap); if (!ata_dev_enabled(ap->link.device)) - ahci_stop_engine(ap); + hpriv->stop_engine(ap); } EXPORT_SYMBOL_GPL(ahci_error_handler); @@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) return; /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 46a762442dc5..30cc8f1a31e1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -25,7 +25,6 @@ #include <linux/phy/phy.h> #include <linux/pm_runtime.h> #include <linux/of_platform.h> -#include <linux/reset.h> #include "ahci.h" static void ahci_host_stop(struct ata_host *host); @@ -196,8 +195,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators); * following order: * 1) Regulator * 2) Clocks (through ahci_platform_enable_clks) - * 3) Resets - * 4) Phys + * 3) Phys * * If resource enabling fails at any point the previous enabled resources * are disabled in reverse order. @@ -217,19 +215,12 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv) if (rc) goto disable_regulator; - rc = reset_control_deassert(hpriv->rsts); - if (rc) - goto disable_clks; - rc = ahci_platform_enable_phys(hpriv); if (rc) - goto disable_resets; + goto disable_clks; return 0; -disable_resets: - reset_control_assert(hpriv->rsts); - disable_clks: ahci_platform_disable_clks(hpriv); @@ -248,15 +239,12 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); * following order: * 1) Phys * 2) Clocks (through ahci_platform_disable_clks) - * 3) Resets - * 4) Regulator + * 3) Regulator */ void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) { ahci_platform_disable_phys(hpriv); - reset_control_assert(hpriv->rsts); - ahci_platform_disable_clks(hpriv); ahci_platform_disable_regulators(hpriv); @@ -405,12 +393,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) hpriv->clks[i] = clk; } - hpriv->rsts = devm_reset_control_array_get_optional_shared(dev); - if (IS_ERR(hpriv->rsts)) { - rc = PTR_ERR(hpriv->rsts); - goto err_out; - } - hpriv->nports = child_nodes = of_get_child_count(dev->of_node); /* diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8bc71ca61e7f..68596bd4cf06 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4549,6 +4549,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM | ATA_HORKAGE_NOLPM, }, + /* This specific Samsung model/firmware-rev does not handle LPM well */ + { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, + + /* Sandisk devices which are known to not handle LPM well */ + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, + /* devices that don't properly handle queued TRIM commands */ { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c016829a38fd..513b260bcff1 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ -static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, - va_list args) +static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, + const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index aafb8cc03523..e67815b896fc 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, int rc; int retry = 100; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 4b1995e2d044..010ca101d412 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -285,13 +285,13 @@ static const struct sil24_cerr_info { [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, "protocol mismatch" }, [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, - "data directon mismatch" }, + "data direction mismatch" }, [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, "ran out of SGEs while writing" }, [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, "ran out of SGEs while reading" }, [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, - "invalid data directon for ATAPI CDB" }, + "invalid data direction for ATAPI CDB" }, [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, "SGT not on qword boundary" }, [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index d97c05690faa..4e46dc9e41ad 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -191,7 +191,7 @@ static char *res_strings[] = { "reserved 37", "reserved 38", "reserved 39", - "reseverd 40", + "reserved 40", "reserved 41", "reserved 42", "reserved 43", diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 44abb8a0a5e5..be076606d30e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) { if ((vcc->pop) && (skb1->len != 0)) { vcc->pop(vcc, skb1); - IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", + IF_EVENT(printk("Transmit Done - skb 0x%lx return\n", (long)skb1);) } else @@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev) status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); if (status & TRANSMIT_DONE){ - IF_EVENT(printk("Tansmit Done Intr logic run\n");) + IF_EVENT(printk("Transmit Done Intr logic run\n");) spin_lock_irqsave(&iadev->tx_lock, flags); ia_tx_poll(iadev); spin_unlock_irqrestore(&iadev->tx_lock, flags); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 1ef67db03c8e..9c9a22958717 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -28,6 +28,7 @@ #include <asm/io.h> #include <linux/atomic.h> #include <linux/uaccess.h> +#include <linux/nospec.h> #include "uPD98401.h" #include "uPD98402.h" @@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; + pool = array_index_nospec(pool, + ZATM_LAST_POOL + 1); spin_lock_irqsave(&zatm_dev->lock, flags); info = zatm_dev->pool_info[pool]; if (cmd == ZATM_GETPOOLZ) { diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 1e6396bb807b..597d40893862 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, maps that memory to the provided vma. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if @vaddr belongs to the device coherent pool and the caller + * should return @ret, or 0 if they should proceed with mapping memory from + * generic areas. */ int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 3b118353ea17..d82566d6e237 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); unsigned long off = vma->vm_pgoff; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - if (off < count && user_count <= (count - off)) { + if (off < count && user_count <= (count - off)) ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, + page_to_pfn(virt_to_page(cpu_addr)) + off, user_count << PAGE_SHIFT, vma->vm_page_prot); - } #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ return ret; diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 31b5015b59fe..358354148dec 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -537,8 +537,8 @@ exit: } /** - * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism - * @fw_sysfs: firmware syfs information for the firmware to load + * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism + * @fw_sysfs: firmware sysfs information for the firmware to load * @opt_flags: flags of options, FW_OPT_* * @timeout: timeout to wait for the load * diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h index dfebc644ed35..f8255670a663 100644 --- a/drivers/base/firmware_loader/fallback.h +++ b/drivers/base/firmware_loader/fallback.h @@ -6,7 +6,7 @@ #include <linux/device.h> /** - * struct firmware_fallback_config - firmware fallback configuratioon settings + * struct firmware_fallback_config - firmware fallback configuration settings * * Helps describe and fine tune the fallback mechanism. * diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c9d04497a415..5d4e31655d96 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq) static void lo_complete_rq(struct request *rq) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + blk_status_t ret = BLK_STS_OK; - if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && - cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { - struct bio *bio = cmd->rq->bio; - - bio_advance(bio, cmd->ret); - zero_fill_bio(bio); + if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || + req_op(rq) != REQ_OP_READ) { + if (cmd->ret < 0) + ret = BLK_STS_IOERR; + goto end_io; } - blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); + /* + * Short READ - if we got some data, advance our request and + * retry it. If we got no data, end the rest with EIO. + */ + if (cmd->ret) { + blk_update_request(rq, BLK_STS_OK, cmd->ret); + cmd->ret = 0; + blk_mq_requeue_request(rq, true); + } else { + if (cmd->use_aio) { + struct bio *bio = rq->bio; + + while (bio) { + zero_fill_bio(bio); + bio = bio->bi_next; + } + } + ret = BLK_STS_IOERR; +end_io: + blk_mq_end_request(rq, ret); + } } static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { + struct request *rq = blk_mq_rq_from_pdu(cmd); + if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) @@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, { struct iov_iter iter; struct bio_vec *bvec; - struct request *rq = cmd->rq; + struct request *rq = blk_mq_rq_from_pdu(cmd); struct bio *bio = rq->bio; struct file *file = lo->lo_backing_file; unsigned int offset; @@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer); static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { - struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); - struct loop_device *lo = cmd->rq->q->queuedata; + struct request *rq = bd->rq; + struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct loop_device *lo = rq->q->queuedata; - blk_mq_start_request(bd->rq); + blk_mq_start_request(rq); if (lo->lo_state != Lo_bound) return BLK_STS_IOERR; - switch (req_op(cmd->rq)) { + switch (req_op(rq)) { case REQ_OP_FLUSH: case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: @@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP - if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { - cmd->css = cmd->rq->bio->bi_css; + if (cmd->use_aio && rq->bio && rq->bio->bi_css) { + cmd->css = rq->bio->bi_css; css_get(cmd->css); } else #endif @@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { - const bool write = op_is_write(req_op(cmd->rq)); - struct loop_device *lo = cmd->rq->q->queuedata; + struct request *rq = blk_mq_rq_from_pdu(cmd); + const bool write = op_is_write(req_op(rq)); + struct loop_device *lo = rq->q->queuedata; int ret = 0; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { @@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd) goto failed; } - ret = do_req_filebacked(lo, cmd->rq); + ret = do_req_filebacked(lo, rq); failed: /* complete non-aio request */ if (!cmd->use_aio || ret) { cmd->ret = ret ? -EIO : 0; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(rq); } } @@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); - cmd->rq = rq; kthread_init_work(&cmd->work, loop_queue_work); - return 0; } diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 0f45416e4fcf..b78de9879f4f 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -66,7 +66,6 @@ struct loop_device { struct loop_cmd { struct kthread_work work; - struct request *rq; bool use_aio; /* use AIO interface to handle I/O */ atomic_t ref; /* only for aio */ long ret; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 07dc5419bd63..33b36fea1d73 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) */ enum { Opt_queue_depth, + Opt_lock_timeout, Opt_last_int, /* int args above */ Opt_last_string, @@ -740,11 +741,13 @@ enum { Opt_read_write, Opt_lock_on_read, Opt_exclusive, + Opt_notrim, Opt_err }; static match_table_t rbd_opts_tokens = { {Opt_queue_depth, "queue_depth=%d"}, + {Opt_lock_timeout, "lock_timeout=%d"}, /* int args above */ /* string args above */ {Opt_read_only, "read_only"}, @@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = { {Opt_read_write, "rw"}, /* Alternate spelling */ {Opt_lock_on_read, "lock_on_read"}, {Opt_exclusive, "exclusive"}, + {Opt_notrim, "notrim"}, {Opt_err, NULL} }; struct rbd_options { int queue_depth; + unsigned long lock_timeout; bool read_only; bool lock_on_read; bool exclusive; + bool trim; }; #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ +#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false #define RBD_EXCLUSIVE_DEFAULT false +#define RBD_TRIM_DEFAULT true static int parse_rbd_opts_token(char *c, void *private) { @@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private) } rbd_opts->queue_depth = intval; break; + case Opt_lock_timeout: + /* 0 is "wait forever" (i.e. infinite timeout) */ + if (intval < 0 || intval > INT_MAX / 1000) { + pr_err("lock_timeout out of range\n"); + return -EINVAL; + } + rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); + break; case Opt_read_only: rbd_opts->read_only = true; break; @@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private) case Opt_exclusive: rbd_opts->exclusive = true; break; + case Opt_notrim: + rbd_opts->trim = false; + break; default: /* libceph prints "bad option" msg */ return -EINVAL; @@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req) case OBJ_OP_DISCARD: return true; default: - rbd_assert(0); + BUG(); } } @@ -2347,7 +2366,9 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0, - obj_req->copyup_bvecs, bytes); + obj_req->copyup_bvecs, + obj_req->copyup_bvec_count, + bytes); switch (obj_req->img_request->op_type) { case OBJ_OP_WRITE: @@ -2466,7 +2487,7 @@ again: } return false; default: - rbd_assert(0); + BUG(); } } @@ -2494,7 +2515,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req) } return false; default: - rbd_assert(0); + BUG(); } } @@ -3533,9 +3554,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, /* * lock_rwsem must be held for read */ -static void rbd_wait_state_locked(struct rbd_device *rbd_dev) +static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) { DEFINE_WAIT(wait); + unsigned long timeout; + int ret = 0; + + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) + return -EBLACKLISTED; + + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) + return 0; + + if (!may_acquire) { + rbd_warn(rbd_dev, "exclusive lock required"); + return -EROFS; + } do { /* @@ -3547,12 +3581,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, TASK_UNINTERRUPTIBLE); up_read(&rbd_dev->lock_rwsem); - schedule(); + timeout = schedule_timeout(ceph_timeout_jiffies( + rbd_dev->opts->lock_timeout)); down_read(&rbd_dev->lock_rwsem); - } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + ret = -EBLACKLISTED; + break; + } + if (!timeout) { + rbd_warn(rbd_dev, "timed out waiting for lock"); + ret = -ETIMEDOUT; + break; + } + } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); finish_wait(&rbd_dev->lock_waitq, &wait); + return ret; } static void rbd_queue_workfn(struct work_struct *work) @@ -3638,19 +3682,10 @@ static void rbd_queue_workfn(struct work_struct *work) (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); if (must_be_locked) { down_read(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { - if (rbd_dev->opts->exclusive) { - rbd_warn(rbd_dev, "exclusive lock required"); - result = -EROFS; - goto err_unlock; - } - rbd_wait_state_locked(rbd_dev); - } - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { - result = -EBLACKLISTED; + result = rbd_wait_state_locked(rbd_dev, + !rbd_dev->opts->exclusive); + if (result) goto err_unlock; - } } img_request = rbd_img_request_create(rbd_dev, op_type, snapc); @@ -3902,7 +3937,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; - u64 segment_size; + unsigned int objset_bytes = + rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; int err; /* create gendisk info */ @@ -3942,20 +3978,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - /* set io sizes to object size */ - segment_size = rbd_obj_bytes(&rbd_dev->header); - blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, segment_size); - blk_queue_io_opt(q, segment_size); + blk_queue_io_min(q, objset_bytes); + blk_queue_io_opt(q, objset_bytes); - /* enable the discard support */ - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = segment_size; - blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); - blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); + if (rbd_dev->opts->trim) { + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + q->limits.discard_granularity = objset_bytes; + blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); + blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); + } if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; @@ -5179,8 +5214,10 @@ static int rbd_add_parse_args(const char *buf, rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; + rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; + rbd_opts->trim = RBD_TRIM_DEFAULT; copts = ceph_parse_options(options, mon_addrs, mon_addrs + mon_addrs_size - 1, @@ -5216,6 +5253,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) { + int ret; + if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); return -EINVAL; @@ -5223,9 +5262,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) /* FIXME: "rbd map --exclusive" should be in interruptible */ down_read(&rbd_dev->lock_rwsem); - rbd_wait_state_locked(rbd_dev); + ret = rbd_wait_state_locked(rbd_dev, true); up_read(&rbd_dev->lock_rwsem); - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + if (ret) { rbd_warn(rbd_dev, "failed to acquire exclusive lock"); return -EROFS; } diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 64e066eba72e..0e31884a9519 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -110,7 +110,7 @@ struct iwm { /* Select values for swim_select and swim_readbit */ #define READ_DATA_0 0x074 -#define TWOMEG_DRIVE 0x075 +#define ONEMEG_DRIVE 0x075 #define SINGLE_SIDED 0x076 #define DRIVE_PRESENT 0x077 #define DISK_IN 0x170 @@ -118,9 +118,9 @@ struct iwm { #define TRACK_ZERO 0x172 #define TACHO 0x173 #define READ_DATA_1 0x174 -#define MFM_MODE 0x175 +#define GCR_MODE 0x175 #define SEEK_COMPLETE 0x176 -#define ONEMEG_MEDIA 0x177 +#define TWOMEG_MEDIA 0x177 /* Bits in handshake register */ @@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs) struct floppy_struct *g; fs->disk_in = 1; fs->write_protected = swim_readbit(base, WRITE_PROT); - fs->type = swim_readbit(base, ONEMEG_MEDIA); if (swim_track00(base)) printk(KERN_ERR @@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs) swim_track00(base); + fs->type = swim_readbit(base, TWOMEG_MEDIA) ? + HD_MEDIA : DD_MEDIA; + fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2; get_floppy_geometry(fs, 0, &g); fs->total_secs = g->size; fs->secpercyl = g->head * g->sect; @@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); udelay(10); - swim_drive(base, INTERNAL_DRIVE); + swim_drive(base, fs->location); swim_motor(base, ON); swim_action(base, SETMFM); if (fs->ejected) @@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) goto out; } + set_capacity(fs->disk, fs->total_secs); + if (mode & FMODE_NDELAY) return 0; @@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, if (copy_to_user((void __user *) param, (void *) &floppy_type, sizeof(struct floppy_struct))) return -EFAULT; - break; - - default: - printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n", - cmd); - return -ENOSYS; + return 0; } - return 0; + return -ENOTTY; } static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) @@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) struct swim_priv *swd = data; int drive = (*part & 3); - if (drive > swd->floppy_count) + if (drive >= swd->floppy_count) return NULL; *part = 0; @@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) swim_motor(base, OFF); - if (swim_readbit(base, SINGLE_SIDED)) - fs->head_number = 1; - else - fs->head_number = 2; + fs->type = HD_MEDIA; + fs->head_number = 2; + fs->ref_count = 0; fs->ejected = 1; @@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd) /* scan floppy drives */ swim_drive(base, INTERNAL_DRIVE); - if (swim_readbit(base, DRIVE_PRESENT)) + if (swim_readbit(base, DRIVE_PRESENT) && + !swim_readbit(base, ONEMEG_DRIVE)) swim_add_floppy(swd, INTERNAL_DRIVE); swim_drive(base, EXTERNAL_DRIVE); - if (swim_readbit(base, DRIVE_PRESENT)) + if (swim_readbit(base, DRIVE_PRESENT) && + !swim_readbit(base, ONEMEG_DRIVE)) swim_add_floppy(swd, EXTERNAL_DRIVE); /* register floppy drives */ @@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd) &swd->lock); if (!swd->unit[drive].disk->queue) { err = -ENOMEM; - put_disk(swd->unit[drive].disk); goto exit_put_disks; } blk_queue_bounce_limit(swd->unit[drive].disk->queue, @@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev) goto out; } - swim_base = ioremap(res->start, resource_size(res)); + swim_base = (struct swim __iomem *)res->start; if (!swim_base) { ret = -ENOMEM; goto out_release_io; @@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev) if (!get_swim_mode(swim_base)) { printk(KERN_INFO "SWIM device not found !\n"); ret = -ENODEV; - goto out_iounmap; + goto out_release_io; } /* set platform driver data */ @@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev) swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); if (!swd) { ret = -ENOMEM; - goto out_iounmap; + goto out_release_io; } platform_set_drvdata(dev, swd); @@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev) out_kfree: kfree(swd); -out_iounmap: - iounmap(swim_base); out_release_io: release_mem_region(res->start, resource_size(res)); out: @@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev) for (drive = 0; drive < swd->floppy_count; drive++) floppy_eject(&swd->unit[drive]); - iounmap(swd->base); - res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index af51015d056e..469541c1e51e 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -148,7 +148,7 @@ struct swim3 { #define MOTOR_ON 2 #define RELAX 3 /* also eject in progress */ #define READ_DATA_0 4 -#define TWOMEG_DRIVE 5 +#define ONEMEG_DRIVE 5 #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ #define DRIVE_PRESENT 7 #define DISK_IN 8 @@ -156,9 +156,9 @@ struct swim3 { #define TRACK_ZERO 10 #define TACHO 11 #define READ_DATA_1 12 -#define MFM_MODE 13 +#define GCR_MODE 13 #define SEEK_COMPLETE 14 -#define ONEMEG_MEDIA 15 +#define TWOMEG_MEDIA 15 /* Definitions of values used in writing and formatting */ #define DATA_ESCAPE 0x99 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c8c8b0b8d333..b937cc1e2c07 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, @@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, /* QCA ROME chipset */ - { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, @@ -399,6 +399,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), }, }, + { + /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), + }, + }, {} }; @@ -2852,6 +2859,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) } #endif +static void btusb_check_needs_reset_resume(struct usb_interface *intf) +{ + if (dmi_check_system(btusb_needs_reset_resume_table)) + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; +} + static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -2974,9 +2987,6 @@ static int btusb_probe(struct usb_interface *intf, hdev->send = btusb_send_frame; hdev->notify = btusb_notify; - if (dmi_check_system(btusb_needs_reset_resume_table)) - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; - #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); if (err) @@ -3064,6 +3074,7 @@ static int btusb_probe(struct usb_interface *intf, data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + btusb_check_needs_reset_resume(intf); } #ifdef CONFIG_BT_HCIBTUSB_RTL diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index d1c0b60e9326..6dc177bf4c42 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -33,6 +33,7 @@ config HISILICON_LPC bool "Support for ISA I/O space on HiSilicon Hip06/7" depends on ARM64 && (ARCH_HISI || COMPILE_TEST) select INDIRECT_PIO + select MFD_CORE if ACPI help Driver to enable I/O access to devices attached to the Low Pin Count bus on the HiSilicon Hip06/7 SoC. diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 8327478effd0..bfc566d3f31a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) return media_changed(cdi, 1); - if ((unsigned int)arg >= cdi->capacity) + if (arg >= cdi->capacity) return -EINVAL; info = kmalloc(sizeof(*info), GFP_KERNEL); diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index c381c8e396fc..79d8c84693a1 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty return 0; } -int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; u32 *gp; @@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) return 0; } -void null_cache_flush(void) +static void null_cache_flush(void) { mb(); } diff --git a/drivers/char/random.c b/drivers/char/random.c index e027e7fa1472..cd888d4ee605 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -261,6 +261,7 @@ #include <linux/ptrace.h> #include <linux/workqueue.h> #include <linux/irq.h> +#include <linux/ratelimit.h> #include <linux/syscalls.h> #include <linux/completion.h> #include <linux/uuid.h> @@ -427,8 +428,9 @@ struct crng_state primary_crng = { * its value (from 0->1->2). */ static int crng_init = 0; -#define crng_ready() (likely(crng_init > 0)) +#define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; +static unsigned long crng_global_init_time = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, __u32 out[CHACHA20_BLOCK_WORDS]); @@ -437,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng, static void process_random_ready_list(void); static void _get_random_bytes(void *buf, int nbytes); +static struct ratelimit_state unseeded_warning = + RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); +static struct ratelimit_state urandom_warning = + RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); + +static int ratelimit_disable __read_mostly; + +module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); +MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -787,6 +799,43 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +#ifdef CONFIG_NUMA +static void do_numa_crng_init(struct work_struct *work) +{ + int i; + struct crng_state *crng; + struct crng_state **pool; + + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(i) { + crng = kmalloc_node(sizeof(struct crng_state), + GFP_KERNEL | __GFP_NOFAIL, i); + spin_lock_init(&crng->lock); + crng_initialize(crng); + pool[i] = crng; + } + mb(); + if (cmpxchg(&crng_node_pool, NULL, pool)) { + for_each_node(i) + kfree(pool[i]); + kfree(pool); + } +} + +static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init); + +static void numa_crng_init(void) +{ + schedule_work(&numa_crng_init_work); +} +#else +static void numa_crng_init(void) {} +#endif + +/* + * crng_fast_load() can be called by code in the interrupt service + * path. So we can't afford to dilly-dally. + */ static int crng_fast_load(const char *cp, size_t len) { unsigned long flags; @@ -794,7 +843,7 @@ static int crng_fast_load(const char *cp, size_t len) if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; - if (crng_ready()) { + if (crng_init != 0) { spin_unlock_irqrestore(&primary_crng.lock, flags); return 0; } @@ -813,6 +862,51 @@ static int crng_fast_load(const char *cp, size_t len) return 1; } +/* + * crng_slow_load() is called by add_device_randomness, which has two + * attributes. (1) We can't trust the buffer passed to it is + * guaranteed to be unpredictable (so it might not have any entropy at + * all), and (2) it doesn't have the performance constraints of + * crng_fast_load(). + * + * So we do something more comprehensive which is guaranteed to touch + * all of the primary_crng's state, and which uses a LFSR with a + * period of 255 as part of the mixing algorithm. Finally, we do + * *not* advance crng_init_cnt since buffer we may get may be something + * like a fixed DMI table (for example), which might very well be + * unique to the machine, but is otherwise unvarying. + */ +static int crng_slow_load(const char *cp, size_t len) +{ + unsigned long flags; + static unsigned char lfsr = 1; + unsigned char tmp; + unsigned i, max = CHACHA20_KEY_SIZE; + const char * src_buf = cp; + char * dest_buf = (char *) &primary_crng.state[4]; + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; + if (crng_init != 0) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } + if (len > max) + max = len; + + for (i = 0; i < max ; i++) { + tmp = lfsr; + lfsr >>= 1; + if (tmp & 1) + lfsr ^= 0xE1; + tmp = dest_buf[i % CHACHA20_KEY_SIZE]; + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; + lfsr += (tmp << 3) | (tmp >> 5); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 1; +} + static void crng_reseed(struct crng_state *crng, struct entropy_store *r) { unsigned long flags; @@ -831,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) _crng_backtrack_protect(&primary_crng, buf.block, CHACHA20_KEY_SIZE); } - spin_lock_irqsave(&primary_crng.lock, flags); + spin_lock_irqsave(&crng->lock, flags); for (i = 0; i < 8; i++) { unsigned long rv; if (!arch_get_random_seed_long(&rv) && @@ -841,13 +935,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; - spin_unlock_irqrestore(&primary_crng.lock, flags); + spin_unlock_irqrestore(&crng->lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; process_random_ready_list(); wake_up_interruptible(&crng_init_wait); pr_notice("random: crng init done\n"); + if (unseeded_warning.missed) { + pr_notice("random: %d get_random_xx warning(s) missed " + "due to ratelimiting\n", + unseeded_warning.missed); + unseeded_warning.missed = 0; + } + if (urandom_warning.missed) { + pr_notice("random: %d urandom warning(s) missed " + "due to ratelimiting\n", + urandom_warning.missed); + urandom_warning.missed = 0; + } } } @@ -856,8 +963,9 @@ static void _extract_crng(struct crng_state *crng, { unsigned long v, flags; - if (crng_init > 1 && - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) + if (crng_ready() && + (time_after(crng_global_init_time, crng->init_time) || + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); if (arch_get_random_long(&v)) @@ -981,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; - if (!crng_ready()) { - crng_fast_load(buf, size); - return; - } + if (!crng_ready() && size) + crng_slow_load(buf, size); trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); @@ -1139,7 +1245,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, sizeof(fast_pool->pool))) { @@ -1489,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM print_once = true; #endif - pr_notice("random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); + if (__ratelimit(&unseeded_warning)) + pr_notice("random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); } /* @@ -1680,28 +1787,14 @@ static void init_std_data(struct entropy_store *r) */ static int rand_initialize(void) { -#ifdef CONFIG_NUMA - int i; - struct crng_state *crng; - struct crng_state **pool; -#endif - init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); - -#ifdef CONFIG_NUMA - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize(crng); - pool[i] = crng; + crng_global_init_time = jiffies; + if (ratelimit_disable) { + urandom_warning.interval = 0; + unseeded_warning.interval = 0; } - mb(); - crng_node_pool = pool; -#endif return 0; } early_initcall(rand_initialize); @@ -1769,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) if (!crng_ready() && maxwarn > 0) { maxwarn--; - printk(KERN_NOTICE "random: %s: uninitialized urandom read " - "(%zd bytes read)\n", - current->comm, nbytes); + if (__ratelimit(&urandom_warning)) + printk(KERN_NOTICE "random: %s: uninitialized " + "urandom read (%zd bytes read)\n", + current->comm, nbytes); spin_lock_irqsave(&primary_crng.lock, flags); crng_init_cnt = 0; spin_unlock_irqrestore(&primary_crng.lock, flags); @@ -1875,6 +1969,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) input_pool.entropy_count = 0; blocking_pool.entropy_count = 0; return 0; + case RNDRESEEDCRNG: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (crng_init < 2) + return -ENODATA; + crng_reseed(&primary_crng, NULL); + crng_global_init_time = jiffies - 1; + return 0; default: return -EINVAL; } @@ -2212,7 +2314,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, { struct entropy_store *poolp = &input_pool; - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); return; } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 468f06134012..21085515814f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void) } } -static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size, int pages) { struct port_buffer *buf; @@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, return buf; } - if (is_rproc_serial(vq->vdev)) { + if (is_rproc_serial(vdev)) { /* * Allocate DMA memory from ancestor. When a virtio * device is created by remoteproc, the DMA memory is * associated with the grandparent device: * vdev => rproc => platform-dev. */ - if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) + if (!vdev->dev.parent || !vdev->dev.parent->parent) goto free_buf; - buf->dev = vq->vdev->dev.parent->parent; + buf->dev = vdev->dev.parent->parent; /* Increase device refcnt to avoid freeing it */ get_device(buf->dev); @@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, count = min((size_t)(32 * 1024), count); - buf = alloc_buf(port->out_vq, count, 0); + buf = alloc_buf(port->portdev->vdev, count, 0); if (!buf) return -ENOMEM; @@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, if (ret < 0) goto error_out; - buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); + buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs); if (!buf) { ret = -ENOMEM; goto error_out; @@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) nr_added_bufs = 0; do { - buf = alloc_buf(vq, PAGE_SIZE, 0); + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); if (!buf) break; @@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; struct port *port; - struct port_buffer *buf; dev_t devt; unsigned int nr_added_bufs; int err; @@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id) return 0; free_inbufs: - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf, true); free_device: device_destroy(pdrvdata.class, port->dev->devt); free_cdev: @@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref) static void remove_port_data(struct port *port) { - struct port_buffer *buf; - spin_lock_irq(&port->inbuf_lock); /* Remove unused data this port might have received. */ discard_port_data(port); spin_unlock_irq(&port->inbuf_lock); - /* Remove buffers we queued up for the Host to send us data in. */ - do { - spin_lock_irq(&port->inbuf_lock); - buf = virtqueue_detach_unused_buf(port->in_vq); - spin_unlock_irq(&port->inbuf_lock); - if (buf) - free_buf(buf, true); - } while (buf); - spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); - - /* Free pending buffers from the out-queue. */ - do { - spin_lock_irq(&port->outvq_lock); - buf = virtqueue_detach_unused_buf(port->out_vq); - spin_unlock_irq(&port->outvq_lock); - if (buf) - free_buf(buf, true); - } while (buf); } /* @@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work) spin_unlock(&portdev->c_ivq_lock); } +static void flush_bufs(struct virtqueue *vq, bool can_sleep) +{ + struct port_buffer *buf; + unsigned int len; + + while ((buf = virtqueue_get_buf(vq, &len))) + free_buf(buf, can_sleep); +} + static void out_intr(struct virtqueue *vq) { struct port *port; port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) + if (!port) { + flush_bufs(vq, false); return; + } wake_up_interruptible(&port->waitqueue); } @@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq) unsigned long flags; port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) + if (!port) { + flush_bufs(vq, false); return; + } spin_lock_irqsave(&port->inbuf_lock, flags); port->inbuf = get_inbuf(port); @@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = { static void remove_vqs(struct ports_device *portdev) { + struct virtqueue *vq; + + virtio_device_for_each_vq(portdev->vdev, vq) { + struct port_buffer *buf; + + flush_bufs(vq, true); + while ((buf = virtqueue_detach_unused_buf(vq))) + free_buf(buf, true); + } portdev->vdev->config->del_vqs(portdev->vdev); kfree(portdev->in_vqs); kfree(portdev->out_vqs); } -static void remove_controlq_data(struct ports_device *portdev) +static void virtcons_remove(struct virtio_device *vdev) { - struct port_buffer *buf; - unsigned int len; + struct ports_device *portdev; + struct port *port, *port2; - if (!use_multiport(portdev)) - return; + portdev = vdev->priv; - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf, true); + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf, true); + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + remove_vqs(portdev); + kfree(portdev); } /* @@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev) spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); + INIT_LIST_HEAD(&portdev->list); virtio_device_ready(portdev->vdev); @@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev) if (!nr_added_bufs) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); - err = -ENOMEM; - goto free_vqs; + /* + * The host might want to notify mgmt sw about device + * add failure. + */ + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 0); + /* Device was functional: we need full cleanup. */ + virtcons_remove(vdev); + return -ENOMEM; } } else { /* @@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev) return 0; -free_vqs: - /* The host might want to notify mgmt sw about device add failure */ - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, - VIRTIO_CONSOLE_DEVICE_READY, 0); - remove_vqs(portdev); free_chrdev: unregister_chrdev(portdev->chr_major, "virtio-portsdev"); free: @@ -2132,43 +2155,6 @@ fail: return err; } -static void virtcons_remove(struct virtio_device *vdev) -{ - struct ports_device *portdev; - struct port *port, *port2; - - portdev = vdev->priv; - - spin_lock_irq(&pdrvdata_lock); - list_del(&portdev->list); - spin_unlock_irq(&pdrvdata_lock); - - /* Disable interrupts for vqs */ - vdev->config->reset(vdev); - /* Finish up work that's lined up */ - if (use_multiport(portdev)) - cancel_work_sync(&portdev->control_work); - else - cancel_work_sync(&portdev->config_work); - - list_for_each_entry_safe(port, port2, &portdev->ports, list) - unplug_port(port); - - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - - /* - * When yanking out a device, we immediately lose the - * (device-side) queues. So there's no point in keeping the - * guest side around till we drop our final reference. This - * also means that any ports which are in an open state will - * have to just stop using the port, as the vqs are going - * away. - */ - remove_controlq_data(portdev); - remove_vqs(portdev); - kfree(portdev); -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev) */ if (use_multiport(portdev)) virtqueue_disable_cb(portdev->c_ivq); - remove_controlq_data(portdev); list_for_each_entry(port, &portdev->ports, list) { virtqueue_disable_cb(port->in_vq); diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c index c58019750b7e..a2f8c42e527a 100644 --- a/drivers/clk/clk-cs2000-cp.c +++ b/drivers/clk/clk-cs2000-cp.c @@ -541,7 +541,7 @@ probe_err: return ret; } -static int cs2000_resume(struct device *dev) +static int __maybe_unused cs2000_resume(struct device *dev) { struct cs2000_priv *priv = dev_get_drvdata(dev); diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index ac4a042f8658..1628b93655ed 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -112,10 +112,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) return 0; } +static int clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_mux *mux = to_clk_mux(hw); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + const struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_mux_ops); diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c index f1d5967b4b39..edd3cf451401 100644 --- a/drivers/clk/clk-stm32mp1.c +++ b/drivers/clk/clk-stm32mp1.c @@ -216,7 +216,7 @@ static const char * const usart1_src[] = { "pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse" }; -const char * const usart234578_src[] = { +static const char * const usart234578_src[] = { "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" }; @@ -224,10 +224,6 @@ static const char * const usart6_src[] = { "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" }; -static const char * const dfsdm_src[] = { - "pclk2", "ck_mcu" -}; - static const char * const fdcan_src[] = { "ck_hse", "pll3_q", "pll4_q" }; @@ -316,10 +312,8 @@ struct stm32_clk_mgate { struct clock_config { u32 id; const char *name; - union { - const char *parent_name; - const char * const *parent_names; - }; + const char *parent_name; + const char * const *parent_names; int num_parents; unsigned long flags; void *cfg; @@ -469,7 +463,7 @@ static void mp1_gate_clk_disable(struct clk_hw *hw) } } -const struct clk_ops mp1_gate_clk_ops = { +static const struct clk_ops mp1_gate_clk_ops = { .enable = mp1_gate_clk_enable, .disable = mp1_gate_clk_disable, .is_enabled = clk_gate_is_enabled, @@ -698,7 +692,7 @@ static void mp1_mgate_clk_disable(struct clk_hw *hw) mp1_gate_clk_disable(hw); } -const struct clk_ops mp1_mgate_clk_ops = { +static const struct clk_ops mp1_mgate_clk_ops = { .enable = mp1_mgate_clk_enable, .disable = mp1_mgate_clk_disable, .is_enabled = clk_gate_is_enabled, @@ -732,7 +726,7 @@ static int clk_mmux_set_parent(struct clk_hw *hw, u8 index) return 0; } -const struct clk_ops clk_mmux_ops = { +static const struct clk_ops clk_mmux_ops = { .get_parent = clk_mmux_get_parent, .set_parent = clk_mmux_set_parent, .determine_rate = __clk_mux_determine_rate, @@ -1048,10 +1042,10 @@ struct stm32_pll_cfg { u32 offset; }; -struct clk_hw *_clk_register_pll(struct device *dev, - struct clk_hw_onecell_data *clk_data, - void __iomem *base, spinlock_t *lock, - const struct clock_config *cfg) +static struct clk_hw *_clk_register_pll(struct device *dev, + struct clk_hw_onecell_data *clk_data, + void __iomem *base, spinlock_t *lock, + const struct clock_config *cfg) { struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg; @@ -1405,7 +1399,8 @@ enum { G_USBH, G_ETHSTP, G_RTCAPB, - G_TZC, + G_TZC1, + G_TZC2, G_TZPC, G_IWDG1, G_BSEC, @@ -1417,7 +1412,7 @@ enum { G_LAST }; -struct stm32_mgate mp1_mgate[G_LAST]; +static struct stm32_mgate mp1_mgate[G_LAST]; #define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\ _mgate, _ops)\ @@ -1440,7 +1435,7 @@ struct stm32_mgate mp1_mgate[G_LAST]; &mp1_mgate[_id], &mp1_mgate_clk_ops) /* Peripheral gates */ -struct stm32_gate_cfg per_gate_cfg[G_LAST] = { +static struct stm32_gate_cfg per_gate_cfg[G_LAST] = { /* Multi gates */ K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0), K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0), @@ -1506,7 +1501,8 @@ struct stm32_gate_cfg per_gate_cfg[G_LAST] = { K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0), K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0), K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0), - K_GATE(G_TZC, RCC_APB5ENSETR, 12, 0), + K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0), + K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0), K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0), K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0), K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0), @@ -1600,7 +1596,7 @@ enum { M_LAST }; -struct stm32_mmux ker_mux[M_LAST]; +static struct stm32_mmux ker_mux[M_LAST]; #define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\ [_id] = {\ @@ -1623,7 +1619,7 @@ struct stm32_mmux ker_mux[M_LAST]; _K_MUX(_id, _offset, _shift, _width, _mux_flags,\ &ker_mux[_id], &clk_mmux_ops) -const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { +static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { /* Kernel multi mux */ K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0), K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0), @@ -1860,7 +1856,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = { PCLK(USART1, "usart1", "pclk5", 0, G_USART1), PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, G_RTCAPB), - PCLK(TZC, "tzc", "pclk5", CLK_IGNORE_UNUSED, G_TZC), + PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1), + PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2), PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC), PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1), PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC), @@ -1916,8 +1913,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = { KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1), KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2), KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY), - KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IGNORE_UNUSED, - G_STGEN, M_STGEN), + KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN), KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF), KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1), KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23), @@ -1948,8 +1944,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = { KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN), KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1), KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2), - KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI2, M_SAI3), - KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI2, M_SAI4), + KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3), + KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4), KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12), KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI), KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1), @@ -1992,10 +1988,6 @@ static const struct clock_config stm32mp1_clock_cfg[] = { _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)), /* Debug clocks */ - FIXED_FACTOR(NO_ID, "ck_axi_div2", "ck_axi", 0, 1, 2), - - GATE(DBG, "ck_apb_dbg", "ck_axi_div2", 0, RCC_DBGCFGR, 8, 0), - GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0), COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ea67ac81c6f9..7af555f0e60c 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -426,9 +426,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, return now <= rate && now > best; } -static int -clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, - unsigned long flags) +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags) { struct clk_core *core = hw->core, *parent, *best_parent = NULL; int i, num_parents, ret; @@ -488,6 +488,7 @@ out: return 0; } +EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); struct clk *__clk_lookup(const char *name) { diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c index 3645fdb62343..ab7a3556f5b2 100644 --- a/drivers/clk/meson/clk-regmap.c +++ b/drivers/clk/meson/clk-regmap.c @@ -153,10 +153,19 @@ static int clk_regmap_mux_set_parent(struct clk_hw *hw, u8 index) val << mux->shift); } +static int clk_regmap_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + const struct clk_ops clk_regmap_mux_ops = { .get_parent = clk_regmap_mux_get_parent, .set_parent = clk_regmap_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = clk_regmap_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_regmap_mux_ops); diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h index 0be78383f257..badc4c22b4ee 100644 --- a/drivers/clk/meson/gxbb-aoclk.h +++ b/drivers/clk/meson/gxbb-aoclk.h @@ -17,8 +17,6 @@ #define AO_RTC_ALT_CLK_CNTL0 0x94 #define AO_RTC_ALT_CLK_CNTL1 0x98 -extern const struct clk_ops meson_aoclk_gate_regmap_ops; - struct aoclk_cec_32k { struct clk_hw hw; struct regmap *regmap; diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index cc2992493e0b..d0524ec71aad 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -253,7 +253,7 @@ static struct clk_fixed_factor meson8b_fclk_div3_div = { .mult = 1, .div = 3, .hw.init = &(struct clk_init_data){ - .name = "fclk_div_div3", + .name = "fclk_div3_div", .ops = &clk_fixed_factor_ops, .parent_names = (const char *[]){ "fixed_pll" }, .num_parents = 1, @@ -632,7 +632,8 @@ static struct clk_regmap meson8b_cpu_clk = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk", .ops = &clk_regmap_mux_ro_ops, - .parent_names = (const char *[]){ "xtal", "cpu_out_sel" }, + .parent_names = (const char *[]){ "xtal", + "cpu_scale_out_sel" }, .num_parents = 2, .flags = (CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT), diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 9ee2888275c1..8e8a09755d10 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -133,6 +133,14 @@ config VT8500_TIMER help Enables support for the VT8500 driver. +config NPCM7XX_TIMER + bool "NPCM7xx timer driver" if COMPILE_TEST + depends on HAS_IOMEM + select CLKSRC_MMIO + help + Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, + While TIMER0 serves as clockevent and TIMER1 serves as clocksource. + config CADENCE_TTC_TIMER bool "Cadence TTC timer driver" if COMPILE_TEST depends on COMMON_CLK diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index e8e76dfef00b..00caf37e52f9 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o obj-$(CONFIG_OWL_TIMER) += owl-timer.o obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o +obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 21bffdcb2f20..6c8318470b48 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -17,9 +17,14 @@ #include <linux/of_irq.h> #include <linux/sched_clock.h> +#define TPM_PARAM 0x4 +#define TPM_PARAM_WIDTH_SHIFT 16 +#define TPM_PARAM_WIDTH_MASK (0xff << 16) #define TPM_SC 0x10 #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) #define TPM_SC_CMOD_DIV_DEFAULT 0x3 +#define TPM_SC_CMOD_DIV_MAX 0x7 +#define TPM_SC_TOF_MASK (0x1 << 7) #define TPM_CNT 0x14 #define TPM_MOD 0x18 #define TPM_STATUS 0x1c @@ -29,8 +34,11 @@ #define TPM_C0SC_MODE_SHIFT 2 #define TPM_C0SC_MODE_MASK 0x3c #define TPM_C0SC_MODE_SW_COMPARE 0x4 +#define TPM_C0SC_CHF_MASK (0x1 << 7) #define TPM_C0V 0x24 +static int counter_width; +static int rating; static void __iomem *timer_base; static struct clock_event_device clockevent_tpm; @@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate) tpm_delay_timer.freq = rate; register_current_timer_delay(&tpm_delay_timer); - sched_clock_register(tpm_read_sched_clock, 32, rate); + sched_clock_register(tpm_read_sched_clock, counter_width, rate); return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", - rate, 200, 32, clocksource_mmio_readl_up); + rate, rating, counter_width, + clocksource_mmio_readl_up); } static int tpm_set_next_event(unsigned long delta, @@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta, * of writing CNT registers which may cause the min_delta event got * missed, so we need add a ETIME check here in case it happened. */ - return (int)((next - now) <= 0) ? -ETIME : 0; + return (int)(next - now) <= 0 ? -ETIME : 0; } static int tpm_set_state_oneshot(struct clock_event_device *evt) @@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = { .set_state_oneshot = tpm_set_state_oneshot, .set_next_event = tpm_set_next_event, .set_state_shutdown = tpm_set_state_shutdown, - .rating = 200, }; static int __init tpm_clockevent_init(unsigned long rate, int irq) @@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq) ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "i.MX7ULP TPM Timer", &clockevent_tpm); + clockevent_tpm.rating = rating; clockevent_tpm.cpumask = cpumask_of(0); clockevent_tpm.irq = irq; - clockevents_config_and_register(&clockevent_tpm, - rate, 300, 0xfffffffe); + clockevents_config_and_register(&clockevent_tpm, rate, 300, + GENMASK(counter_width - 1, 1)); return ret; } @@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np) ipg = of_clk_get_by_name(np, "ipg"); per = of_clk_get_by_name(np, "per"); if (IS_ERR(ipg) || IS_ERR(per)) { - pr_err("tpm: failed to get igp or per clk\n"); + pr_err("tpm: failed to get ipg or per clk\n"); ret = -ENODEV; goto err_clk_get; } @@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np) goto err_per_clk_enable; } + counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK) + >> TPM_PARAM_WIDTH_SHIFT; + /* use rating 200 for 32-bit counter and 150 for 16-bit counter */ + rating = counter_width == 0x20 ? 200 : 150; + /* * Initialize tpm module to a known state * 1) Counter disabled @@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np) * 4) Channel0 disabled * 5) DMA transfers disabled */ + /* make sure counter is disabled */ writel(0, timer_base + TPM_SC); + /* TOF is W1C */ + writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); writel(0, timer_base + TPM_CNT); - writel(0, timer_base + TPM_C0SC); + /* CHF is W1C */ + writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); - /* increase per cnt, div 8 by default */ - writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, + /* + * increase per cnt, + * div 8 for 32-bit counter and div 128 for 16-bit counter + */ + writel(TPM_SC_CMOD_INC_PER_CNT | + (counter_width == 0x20 ? + TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX), timer_base + TPM_SC); /* set MOD register to maximum for free running mode */ - writel(0xffffffff, timer_base + TPM_MOD); + writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD); rate = clk_get_rate(per) >> 3; ret = tpm_clocksource_init(rate); diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c new file mode 100644 index 000000000000..7a9bb5532d99 --- /dev/null +++ b/drivers/clocksource/timer-npcm7xx.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com + * All rights reserved. + * + * Copyright 2017 Google, Inc. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/clockchips.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include "timer-of.h" + +/* Timers registers */ +#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */ +#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */ +#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */ +#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */ +#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */ +#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */ + +/* Timers control */ +#define NPCM7XX_Tx_RESETINT 0x1f +#define NPCM7XX_Tx_PERIOD BIT(27) +#define NPCM7XX_Tx_INTEN BIT(29) +#define NPCM7XX_Tx_COUNTEN BIT(30) +#define NPCM7XX_Tx_ONESHOT 0x0 +#define NPCM7XX_Tx_OPER GENMASK(3, 27) +#define NPCM7XX_Tx_MIN_PRESCALE 0x1 +#define NPCM7XX_Tx_TDR_MASK_BITS 24 +#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF +#define NPCM7XX_T0_CLR_INT 0x1 +#define NPCM7XX_Tx_CLR_CSR 0x0 + +/* Timers operating mode */ +#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \ + NPCM7XX_Tx_INTEN | \ + NPCM7XX_Tx_MIN_PRESCALE) + +#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \ + NPCM7XX_Tx_INTEN | \ + NPCM7XX_Tx_MIN_PRESCALE) + +#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \ + NPCM7XX_Tx_MIN_PRESCALE) + +#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE) + +static int npcm7xx_timer_resume(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val |= NPCM7XX_Tx_COUNTEN; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_timer_shutdown(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val &= ~NPCM7XX_Tx_COUNTEN; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_timer_oneshot(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val &= ~NPCM7XX_Tx_OPER; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val |= NPCM7XX_START_ONESHOT_Tx; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_timer_periodic(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val &= ~NPCM7XX_Tx_OPER; + + writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0); + val |= NPCM7XX_START_PERIODIC_Tx; + + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_clockevent_set_next_event(unsigned long evt, + struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + u32 val; + + writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0); + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val |= NPCM7XX_START_Tx; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = (struct clock_event_device *)dev_id; + struct timer_of *to = to_timer_of(evt); + + writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct timer_of npcm7xx_to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + + .clkevt = { + .name = "npcm7xx-timer0", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = npcm7xx_clockevent_set_next_event, + .set_state_shutdown = npcm7xx_timer_shutdown, + .set_state_periodic = npcm7xx_timer_periodic, + .set_state_oneshot = npcm7xx_timer_oneshot, + .tick_resume = npcm7xx_timer_resume, + .rating = 300, + }, + + .of_irq = { + .handler = npcm7xx_timer0_interrupt, + .flags = IRQF_TIMER | IRQF_IRQPOLL, + }, +}; + +static void __init npcm7xx_clockevents_init(void) +{ + writel(NPCM7XX_DEFAULT_CSR, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0); + + writel(NPCM7XX_Tx_RESETINT, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR); + + npcm7xx_to.clkevt.cpumask = cpumask_of(0); + clockevents_config_and_register(&npcm7xx_to.clkevt, + timer_of_rate(&npcm7xx_to), + 0x1, NPCM7XX_Tx_MAX_CNT); +} + +static void __init npcm7xx_clocksource_init(void) +{ + u32 val; + + writel(NPCM7XX_DEFAULT_CSR, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); + writel(NPCM7XX_Tx_MAX_CNT, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1); + + val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); + val |= NPCM7XX_START_Tx; + writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); + + clocksource_mmio_init(timer_of_base(&npcm7xx_to) + + NPCM7XX_REG_TDR1, + "npcm7xx-timer1", timer_of_rate(&npcm7xx_to), + 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS, + clocksource_mmio_readl_down); +} + +static int __init npcm7xx_timer_init(struct device_node *np) +{ + int ret; + + ret = timer_of_init(np, &npcm7xx_to); + if (ret) + return ret; + + /* Clock input is divided by PRESCALE + 1 before it is fed */ + /* to the counter */ + npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate / + (NPCM7XX_Tx_MIN_PRESCALE + 1); + + npcm7xx_clocksource_init(); + npcm7xx_clockevents_init(); + + pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ", + timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to)); + + return 0; +} + +TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); + diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 7f56fe5183f2..de55c7d57438 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS. -config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - bool "Broadcom STB AVS CPUfreq driver sysfs debug capability" - depends on ARM_BRCMSTB_AVS_CPUFREQ - help - Enabling this option turns on debug support via sysfs under - /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and - write some AVS mailbox registers through sysfs entries. - - If in doubt, say N. - config ARM_EXYNOS5440_CPUFREQ tristate "SAMSUNG EXYNOS5440" depends on SOC_EXYNOS5440 diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 6cdac1aaf23c..b07559b9ed99 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -49,13 +49,6 @@ #include <linux/platform_device.h> #include <linux/semaphore.h> -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG -#include <linux/ctype.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#endif - /* Max number of arguments AVS calls take */ #define AVS_MAX_CMD_ARGS 4 /* @@ -182,88 +175,11 @@ struct private_data { void __iomem *base; void __iomem *avs_intr_base; struct device *dev; -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - struct dentry *debugfs; -#endif struct completion done; struct semaphore sem; struct pmap pmap; }; -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - -enum debugfs_format { - DEBUGFS_NORMAL, - DEBUGFS_FLOAT, - DEBUGFS_REV, -}; - -struct debugfs_data { - struct debugfs_entry *entry; - struct private_data *priv; -}; - -struct debugfs_entry { - char *name; - u32 offset; - fmode_t mode; - enum debugfs_format format; -}; - -#define DEBUGFS_ENTRY(name, mode, format) { \ - #name, AVS_MBOX_##name, mode, format \ -} - -/* - * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly. - */ -#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0) -#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1) -#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2) -#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3) - -/* - * This table stores the name, access permissions and offset for each hardware - * register and is used to generate debugfs entries. - */ -static struct debugfs_entry debugfs_entries[] = { - DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV), - DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL), -}; - -static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int); - -static char *__strtolower(char *s) -{ - char *p; - - for (p = s; *p; p++) - *p = tolower(*p); - - return s; -} - -#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */ - static void __iomem *__map_region(const char *name) { struct device_node *np; @@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv) return table; } -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - -#define MANT(x) (unsigned int)(abs((x)) / 1000) -#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000) - -static int brcm_avs_debug_show(struct seq_file *s, void *data) -{ - struct debugfs_data *dbgfs = s->private; - void __iomem *base; - u32 val, offset; - - if (!dbgfs) { - seq_puts(s, "No device pointer\n"); - return 0; - } - - base = dbgfs->priv->base; - offset = dbgfs->entry->offset; - val = readl(base + offset); - switch (dbgfs->entry->format) { - case DEBUGFS_NORMAL: - seq_printf(s, "%u\n", val); - break; - case DEBUGFS_FLOAT: - seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val)); - break; - case DEBUGFS_REV: - seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff), - (val >> 16 & 0xff), (val >> 8 & 0xff), - val & 0xff); - break; - } - seq_printf(s, "0x%08x\n", val); - - return 0; -} - -#undef MANT -#undef FRAC - -static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf, - size_t size, loff_t *ppos) -{ - struct seq_file *s = file->private_data; - struct debugfs_data *dbgfs = s->private; - struct private_data *priv = dbgfs->priv; - void __iomem *base, *avs_intr_base; - bool use_issue_command = false; - unsigned long val, offset; - char str[128]; - int ret; - char *str_ptr = str; - - if (size >= sizeof(str)) - return -E2BIG; - - memset(str, 0, sizeof(str)); - ret = copy_from_user(str, buf, size); - if (ret) - return ret; - - base = priv->base; - avs_intr_base = priv->avs_intr_base; - offset = dbgfs->entry->offset; - /* - * Special case writing to "command" entry only: if the string starts - * with a 'c', we use the driver's __issue_avs_command() function. - * Otherwise, we perform a raw write. This should allow testing of raw - * access as well as using the higher level function. (Raw access - * doesn't clear the firmware return status after issuing the command.) - */ - if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) { - use_issue_command = true; - str_ptr++; - } - if (kstrtoul(str_ptr, 0, &val) != 0) - return -EINVAL; - - /* - * Setting the P-state is a special case. We need to update the CPU - * frequency we report. - */ - if (val == AVS_CMD_SET_PSTATE) { - struct cpufreq_policy *policy; - unsigned int pstate; - - policy = cpufreq_cpu_get(smp_processor_id()); - /* Read back the P-state we are about to set */ - pstate = readl(base + AVS_MBOX_PARAM(0)); - if (use_issue_command) { - ret = brcm_avs_target_index(policy, pstate); - return ret ? ret : size; - } - policy->cur = policy->freq_table[pstate].frequency; - } - - if (use_issue_command) { - ret = __issue_avs_command(priv, val, false, NULL); - } else { - /* Locking here is not perfect, but is only for debug. */ - ret = down_interruptible(&priv->sem); - if (ret) - return ret; - - writel(val, base + offset); - /* We have to wake up the firmware to process a command. */ - if (offset == AVS_MBOX_COMMAND) - writel(AVS_CPU_L2_INT_MASK, - avs_intr_base + AVS_CPU_L2_SET0); - up(&priv->sem); - } - - return ret ? ret : size; -} - -static struct debugfs_entry *__find_debugfs_entry(const char *name) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) - if (strcasecmp(debugfs_entries[i].name, name) == 0) - return &debugfs_entries[i]; - - return NULL; -} - -static int brcm_avs_debug_open(struct inode *inode, struct file *file) -{ - struct debugfs_data *data; - fmode_t fmode; - int ret; - - /* - * seq_open(), which is called by single_open(), clears "write" access. - * We need write access to some files, so we preserve our access mode - * and restore it. - */ - fmode = file->f_mode; - /* - * Check access permissions even for root. We don't want to be writing - * to read-only registers. Access for regular users has already been - * checked by the VFS layer. - */ - if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR)) - return -EACCES; - - data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - /* - * We use the same file system operations for all our debug files. To - * produce specific output, we look up the file name upon opening a - * debugfs entry and map it to a memory offset. This offset is then used - * in the generic "show" function to read a specific register. - */ - data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname); - data->priv = inode->i_private; - - ret = single_open(file, brcm_avs_debug_show, data); - if (ret) - kfree(data); - file->f_mode = fmode; - - return ret; -} - -static int brcm_avs_debug_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq_priv = file->private_data; - struct debugfs_data *data = seq_priv->private; - - kfree(data); - return single_release(inode, file); -} - -static const struct file_operations brcm_avs_debug_ops = { - .open = brcm_avs_debug_open, - .read = seq_read, - .write = brcm_avs_seq_write, - .llseek = seq_lseek, - .release = brcm_avs_debug_release, -}; - -static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) -{ - struct private_data *priv = platform_get_drvdata(pdev); - struct dentry *dir; - int i; - - if (!priv) - return; - - dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL); - if (IS_ERR_OR_NULL(dir)) - return; - priv->debugfs = dir; - - for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) { - /* - * The DEBUGFS_ENTRY macro generates uppercase strings. We - * convert them to lowercase before creating the debugfs - * entries. - */ - char *entry = __strtolower(debugfs_entries[i].name); - fmode_t mode = debugfs_entries[i].mode; - - if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode, - dir, priv, &brcm_avs_debug_ops)) { - priv->debugfs = NULL; - debugfs_remove_recursive(dir); - break; - } - } -} - -static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) -{ - struct private_data *priv = platform_get_drvdata(pdev); - - if (priv && priv->debugfs) { - debugfs_remove_recursive(priv->debugfs); - priv->debugfs = NULL; - } -} - -#else - -static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {} -static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {} - -#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */ - /* * To ensure the right firmware is running we need to * - check the MAGIC matches what we expect @@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev) return ret; brcm_avs_driver.driver_data = pdev; - ret = cpufreq_register_driver(&brcm_avs_driver); - if (!ret) - brcm_avs_cpufreq_debug_init(pdev); - return ret; + return cpufreq_register_driver(&brcm_avs_driver); } static int brcm_avs_cpufreq_remove(struct platform_device *pdev) @@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev) if (ret) return ret; - brcm_avs_cpufreq_debug_exit(pdev); - priv = platform_get_drvdata(pdev); iounmap(priv->base); iounmap(priv->avs_intr_base); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index bc5fc1630876..b15115a48775 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) cpu->perf_caps.lowest_perf, cpu_num, ret); } +/* + * The PCC subspace describes the rate at which platform can accept commands + * on the shared PCC channel (including READs which do not count towards freq + * trasition requests), so ideally we need to use the PCC values as a fallback + * if we don't have a platform specific transition_delay_us + */ +#ifdef CONFIG_ARM64 +#include <asm/cputype.h> + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_num = read_cpuid_part_number(); + unsigned int delay_us = 0; + + switch (implementor) { + case ARM_CPU_IMP_QCOM: + switch (part_num) { + case QCOM_CPU_PART_FALKOR_V1: + case QCOM_CPU_PART_FALKOR: + delay_us = 10000; + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + + return delay_us; +} + +#else + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; +} +#endif + static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu; @@ -162,8 +205,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu->perf_caps.highest_perf; policy->cpuinfo.max_freq = cppc_dmi_max_khz; - policy->transition_delay_us = cppc_get_transition_latency(cpu_num) / - NSEC_PER_USEC; + policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 0591874856d3..54edaec1e608 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t) if (!spin_trylock(&gpstates->gpstate_lock)) return; + /* + * If the timer has migrated to the different cpu then bring + * it back to one of the policy->cpus + */ + if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) { + gpstates->timer.expires = jiffies + msecs_to_jiffies(1); + add_timer_on(&gpstates->timer, cpumask_first(policy->cpus)); + spin_unlock(&gpstates->gpstate_lock); + return; + } /* * If PMCR was last updated was using fast_swtich then @@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t) if (gpstate_idx != gpstates->last_lpstate_idx) queue_gpstate_timer(gpstates); + set_pstate(&freq_data); spin_unlock(&gpstates->gpstate_lock); - - /* Timer may get migrated to a different cpu on cpu hot unplug */ - smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); } /* diff --git a/drivers/dax/device.c b/drivers/dax/device.c index be8606457f27..aff2c1594220 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -19,6 +19,7 @@ #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> +#include <linux/mman.h> #include "dax-private.h" #include "dax.h" @@ -540,6 +541,7 @@ static const struct file_operations dax_fops = { .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, + .mmap_supported_flags = MAP_SYNC, }; static void dev_dax_release(struct device *dev) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index e6f17825db79..2b90606452a2 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) struct clock_info *ci = handle->clk_priv; struct scmi_clock_info *clk = ci->clk + clk_id; - if (!clk->name || !clk->name[0]) + if (!clk->name[0]) return NULL; return clk; diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 14f14efdf0d5..06d212a3d49d 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi) conf->data = of_id->data; conf->spi = spi; - conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH); + conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); if (IS_ERR(conf->config)) { dev_err(&spi->dev, "Failed to get config gpio: %ld\n", PTR_ERR(conf->config)); diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 77e485557498..6f693b7d5220 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) if (set) reg |= bit; else - reg &= bit; + reg &= ~bit; iowrite32(reg, addr); spin_unlock_irqrestore(&gpio->lock, flags); diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c index 1948724d8c36..25d16b2af1c3 100644 --- a/drivers/gpio/gpio-pci-idio-16.c +++ b/drivers/gpio/gpio-pci-idio-16.c @@ -116,9 +116,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip, unsigned long word_mask; const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0); unsigned long port_state; - u8 __iomem ports[] = { - idio16gpio->reg->out0_7, idio16gpio->reg->out8_15, - idio16gpio->reg->in0_7, idio16gpio->reg->in8_15, + void __iomem *ports[] = { + &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15, + &idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15, }; /* clear bits array to a clean slate */ @@ -143,7 +143,7 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip, } /* read bits from current gpio port */ - port_state = ioread8(ports + i); + port_state = ioread8(ports[i]); /* store acquired bits at respective bits array offset */ bits[word_index] |= port_state << word_offset; diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c index 835607ecf658..f953541e7890 100644 --- a/drivers/gpio/gpio-pcie-idio-24.c +++ b/drivers/gpio/gpio-pcie-idio-24.c @@ -206,10 +206,10 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip, unsigned long word_mask; const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0); unsigned long port_state; - u8 __iomem ports[] = { - idio24gpio->reg->out0_7, idio24gpio->reg->out8_15, - idio24gpio->reg->out16_23, idio24gpio->reg->in0_7, - idio24gpio->reg->in8_15, idio24gpio->reg->in16_23, + void __iomem *ports[] = { + &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15, + &idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7, + &idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23, }; const unsigned long out_mode_mask = BIT(1); @@ -217,7 +217,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip, bitmap_zero(bits, chip->ngpio); /* get bits are evaluated a gpio port register at a time */ - for (i = 0; i < ARRAY_SIZE(ports); i++) { + for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) { /* gpio offset in bits array */ bits_offset = i * gpio_reg_size; @@ -236,7 +236,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip, /* read bits from current gpio port (port 6 is TTL GPIO) */ if (i < 6) - port_state = ioread8(ports + i); + port_state = ioread8(ports[i]); else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask) port_state = ioread8(&idio24gpio->reg->ttl_out0_7); else @@ -301,9 +301,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip, const unsigned long port_mask = GENMASK(gpio_reg_size, 0); unsigned long flags; unsigned int out_state; - u8 __iomem ports[] = { - idio24gpio->reg->out0_7, idio24gpio->reg->out8_15, - idio24gpio->reg->out16_23 + void __iomem *ports[] = { + &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15, + &idio24gpio->reg->out16_23 }; const unsigned long out_mode_mask = BIT(1); const unsigned int ttl_offset = 48; @@ -327,9 +327,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip, raw_spin_lock_irqsave(&idio24gpio->lock, flags); /* process output lines */ - out_state = ioread8(ports + i) & ~gpio_mask; + out_state = ioread8(ports[i]) & ~gpio_mask; out_state |= (*bits >> bits_offset) & gpio_mask; - iowrite8(out_state, ports + i); + iowrite8(out_state, ports[i]); raw_spin_unlock_irqrestore(&idio24gpio->lock, flags); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 43aeb07343ec..d8ccb500872f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -497,7 +497,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) struct gpiohandle_request handlereq; struct linehandle_state *lh; struct file *file; - int fd, i, ret; + int fd, i, count = 0, ret; u32 lflags; if (copy_from_user(&handlereq, ip, sizeof(handlereq))) @@ -558,6 +558,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_descs; lh->descs[i] = desc; + count = i; if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); @@ -628,7 +629,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) out_put_unused_fd: put_unused_fd(fd); out_free_descs: - for (; i >= 0; i--) + for (i = 0; i < count; i++) gpiod_free(lh->descs[i]); kfree(lh->label); out_free_lh: @@ -902,7 +903,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) desc = &gdev->descs[offset]; ret = gpiod_request(desc, le->label); if (ret) - goto out_free_desc; + goto out_free_label; le->desc = desc; le->eflags = eflags; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 09d35051fdd6..3fabf9f97022 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) if (other) { signed long r; - r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + r = dma_fence_wait(other, true); if (r < 0) { - DRM_ERROR("Error (%ld) waiting for fence!\n", r); + if (r != -ERESTARTSYS) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + return r; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b0e591eaa71a..e14263fca1c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] = static const u32 vgpr_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, - mmCOMPUTE_RESOURCE_LIMITS, 0, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*4, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] = static const u32 sgpr1_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, - mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] = mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index ed2f06c9f346..3858820a0055 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -6,5 +6,6 @@ config HSA_AMD tristate "HSA kernel driver for AMD GPU devices" depends on DRM_AMDGPU && X86_64 imply AMD_IOMMU_V2 + select MMU_NOTIFIER help Enable this if you want to use HSA features on AMD GPU devices. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index cd679cf1fd30..59808a39ecf4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, struct timespec64 time; dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) - return -EINVAL; - - /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = - dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + if (dev) + /* Reading GPU clock counter from KGD */ + args->gpu_clock_counter = + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + else + /* Node without GPU resource */ + args->gpu_clock_counter = 0; /* No access to rdtsc. Using raw monotonic time */ getrawmonotonic64(&time); @@ -1147,7 +1148,7 @@ err_unlock: return ret; } -bool kfd_dev_is_large_bar(struct kfd_dev *dev) +static bool kfd_dev_is_large_bar(struct kfd_dev *dev) { struct kfd_local_mem_info mem_info; @@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, pdd = kfd_get_process_device_data(dev, p); if (!pdd) { - err = PTR_ERR(pdd); + err = -EINVAL; goto bind_process_to_device_failed; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4e2f379ce217..1dd1142246c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc, struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *new_con_state = NULL; struct dm_connector_state *dm_conn_state = NULL; + struct drm_plane_state *new_plane_state = NULL; new_stream = NULL; @@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc, dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); + new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); + + if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { + ret = -EINVAL; + goto fail; + } + aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); /* TODO This hack should go away */ @@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc, if (!dm_old_crtc_state->stream) continue; - DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", plane->base.id, old_plane_crtc->base.id); if (!dc_remove_plane_from_context( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index f6cb502c303f..25f064c01038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) lut = (struct drm_color_lut *)blob->data; lut_size = blob->length / sizeof(struct drm_color_lut); - if (__is_lut_linear(lut, lut_size)) { - /* Set to bypass if lut is set to linear */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - return 0; - } - gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 490017df371d..4be21bf54749 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; struct irq_list_head *lh; + unsigned long irq_table_flags; DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); - for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { - + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' * (because no code can schedule a new one). */ lh = &adev->dm.irq_handler_list_low_tab[src]; + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); flush_work(&lh->work); } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8291d74f26bc..4304d9e408b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? I2C_MOT_TRUE : I2C_MOT_FALSE; enum ddc_result res; - ssize_t read_bytes; + uint32_t read_bytes = msg->size; if (WARN_ON(msg->size > 16)) return -E2BIG; switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_READ: - read_bytes = dal_ddc_service_read_dpcd_data( + res = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, false, I2C_MOT_UNDEF, msg->address, msg->buffer, - msg->size); - return read_bytes; + msg->size, + &read_bytes); + break; case DP_AUX_NATIVE_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, msg->size); break; case DP_AUX_I2C_READ: - read_bytes = dal_ddc_service_read_dpcd_data( + res = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, true, mot, msg->address, msg->buffer, - msg->size); - return read_bytes; + msg->size, + &read_bytes); + break; case DP_AUX_I2C_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, r == DDC_RESULT_SUCESSFULL); #endif - return msg->size; + if (res != DDC_RESULT_SUCESSFULL) + return -EIO; + return read_bytes; } static enum drm_connector_status @@ -161,6 +165,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; + if (amdgpu_dm_connector->edid) { + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; + } + drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); drm_connector_cleanup(connector); @@ -181,28 +190,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { void dm_dp_mst_dc_sink_create(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct edid *edid; struct dc_sink *dc_sink; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + /* FIXME none of this is safe. we shouldn't touch aconnector here in + * atomic_check + */ + /* * TODO: Need to further figure out why ddc.algo is NULL while MST port exists */ if (!aconnector->port || !aconnector->port->aux.ddc.algo) return; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); - - if (!edid) { - drm_mode_connector_update_edid_property( - &aconnector->base, - NULL); - return; - } - - aconnector->edid = edid; + ASSERT(aconnector->edid); dc_sink = dc_link_add_remote_sink( aconnector->dc_link, @@ -215,9 +218,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector) amdgpu_dm_add_sink_to_freesync_module( connector, aconnector->edid); - - drm_mode_connector_update_edid_property( - &aconnector->base, aconnector->edid); } static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -230,10 +230,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; - struct dc_sink *dc_sink; - struct dc_sink_init_data init_params = { - .link = aconnector->dc_link, - .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { @@ -244,11 +240,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } aconnector->edid = edid; + } + if (!aconnector->dc_sink) { + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)edid, - (edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)aconnector->edid, + (aconnector->edid->extensions + 1) * EDID_LENGTH, &init_params); dc_sink->priv = aconnector; @@ -256,12 +258,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) amdgpu_dm_add_sink_to_freesync_module( - connector, edid); - - drm_mode_connector_update_edid_property( - &aconnector->base, edid); + connector, aconnector->edid); } + drm_mode_connector_update_edid_property( + &aconnector->base, aconnector->edid); + ret = drm_add_edid_modes(connector, aconnector->edid); return ret; @@ -424,14 +426,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; } - if (aconnector->edid) { - kfree(aconnector->edid); - aconnector->edid = NULL; - } - - drm_mode_connector_update_edid_property( - &aconnector->base, - NULL); aconnector->mst_connected = false; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 985fe8c22875..10a5807a7e8b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1( struct bios_parser *bp, struct dc_firmware_info *info); +static enum bp_result get_firmware_info_v3_2( + struct bios_parser *bp, + struct dc_firmware_info *info); + static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, struct atom_display_object_path_v2 *object); @@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info( case 3: switch (revision.minor) { case 1: - case 2: result = get_firmware_info_v3_1(bp, info); break; + case 2: + result = get_firmware_info_v3_2(bp, info); + break; default: break; } @@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1( return BP_RESULT_OK; } +static enum bp_result get_firmware_info_v3_2( + struct bios_parser *bp, + struct dc_firmware_info *info) +{ + struct atom_firmware_info_v3_2 *firmware_info; + struct atom_display_controller_info_v4_1 *dce_info = NULL; + struct atom_common_table_header *header; + struct atom_data_revision revision; + struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL; + struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL; + + if (!info) + return BP_RESULT_BADINPUT; + + firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2, + DATA_TABLES(firmwareinfo)); + + dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1, + DATA_TABLES(dce_info)); + + if (!firmware_info || !dce_info) + return BP_RESULT_BADBIOSTABLE; + + memset(info, 0, sizeof(*info)); + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(smu_info)); + get_atom_data_table_revision(header, &revision); + + if (revision.minor == 2) { + /* Vega12 */ + smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, + DATA_TABLES(smu_info)); + + if (!smu_info_v3_2) + return BP_RESULT_BADBIOSTABLE; + + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; + } else if (revision.minor == 3) { + /* Vega20 */ + smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, + DATA_TABLES(smu_info)); + + if (!smu_info_v3_3) + return BP_RESULT_BADBIOSTABLE; + + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; + } + + // We need to convert from 10KHz units into KHz units. + info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; + + /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */ + info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10; + /* Hardcode frequency if BIOS gives no DCE Ref Clk */ + if (info->pll_info.crystal_frequency == 0) { + if (revision.minor == 2) + info->pll_info.crystal_frequency = 27000; + else if (revision.minor == 3) + info->pll_info.crystal_frequency = 100000; + } + /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/ + info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10; + info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10; + + /* Get GPU PLL VCO Clock */ + if (bp->cmd_tbl.get_smu_clock_info != NULL) { + if (revision.minor == 2) + info->smu_gpu_pll_output_freq = + bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; + else if (revision.minor == 3) + info->smu_gpu_pll_output_freq = + bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; + } + + return BP_RESULT_OK; +} + static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 49c2face1e7a..ae48d603ebd6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data( return ret; } -ssize_t dal_ddc_service_read_dpcd_data( +enum ddc_result dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len) + uint32_t len, + uint32_t *read) { struct aux_payload read_payload = { .i2c_over_aux = i2c, @@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data( .mot = mot }; + *read = 0; + if (len > DEFAULT_AUX_MAX_DATA_SIZE) { BREAK_TO_DEBUGGER(); return DDC_RESULT_FAILED_INVALID_OPERATION; @@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data( ddc->ctx->i2caux, ddc->ddc_pin, &command)) { - return (ssize_t)command.payloads->length; + *read = command.payloads->length; + return DDC_RESULT_SUCESSFULL; } return DDC_RESULT_FAILED_OPERATION; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ade5b8ee9c3c..132eef3826e2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -66,8 +66,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc) { struct dc *core_dc = dc; - struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state), - GFP_KERNEL); + struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), + GFP_KERNEL); if (NULL == plane_state) return NULL; @@ -120,7 +120,7 @@ static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); destruct(plane_state); - kfree(plane_state); + kvfree(plane_state); } void dc_plane_state_release(struct dc_plane_state *plane_state) @@ -136,7 +136,7 @@ void dc_gamma_retain(struct dc_gamma *gamma) static void dc_gamma_free(struct kref *kref) { struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); - kfree(gamma); + kvfree(gamma); } void dc_gamma_release(struct dc_gamma **gamma) @@ -147,7 +147,7 @@ void dc_gamma_release(struct dc_gamma **gamma) struct dc_gamma *dc_create_gamma(void) { - struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL); + struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL); if (gamma == NULL) goto alloc_fail; @@ -167,7 +167,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf) static void dc_transfer_func_free(struct kref *kref) { struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); - kfree(tf); + kvfree(tf); } void dc_transfer_func_release(struct dc_transfer_func *tf) @@ -177,7 +177,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf) struct dc_transfer_func *dc_create_transfer_func(void) { - struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL); + struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL); if (tf == NULL) goto alloc_fail; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 090b7a8dd67b..30b3a08b91be 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); -ssize_t dal_ddc_service_read_dpcd_data( +enum ddc_result dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len); + uint32_t len, + uint32_t *read); enum ddc_result dal_ddc_service_write_dpcd_data( struct ddc_service *ddc, diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 9831cb5eaa7c..9b0a04f99ac8 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -113,9 +113,14 @@ #define AI_GREENLAND_P_A0 1 #define AI_GREENLAND_P_A1 2 +#define AI_UNKNOWN 0xFF -#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN) -#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN) +#define AI_VEGA12_P_A0 20 +#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0) +#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0) + +#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) +#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) /* DCN1_0 */ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index e7e374f56864..b3747a019deb 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1093,19 +1093,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; - axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), - GFP_KERNEL); + axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), + GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1157,13 +1157,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, ret = true; - kfree(coeff); + kvfree(coeff); coeff_alloc_fail: - kfree(axix_x); + kvfree(axix_x); axix_x_alloc_fail: - kfree(rgb_regamma); + kvfree(rgb_regamma); rgb_regamma_alloc_fail: - kfree(rgb_user); + kvfree(rgb_user); rgb_user_alloc_fail: return ret; } @@ -1192,19 +1192,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!curve) goto curve_alloc_fail; - axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1246,13 +1246,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, ret = true; - kfree(coeff); + kvfree(coeff); coeff_alloc_fail: - kfree(axix_x); + kvfree(axix_x); axix_x_alloc_fail: - kfree(curve); + kvfree(curve); curve_alloc_fail: - kfree(rgb_user); + kvfree(rgb_user); rgb_user_alloc_fail: return ret; @@ -1281,8 +1281,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; points->end_exponent = 7; @@ -1302,11 +1303,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_regamma); + kvfree(rgb_regamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; points->end_exponent = 0; @@ -1324,7 +1326,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_regamma); + kvfree(rgb_regamma); } rgb_regamma_alloc_fail: return ret; @@ -1348,8 +1350,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1364,11 +1367,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_degamma); + kvfree(rgb_degamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1382,7 +1386,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_degamma); + kvfree(rgb_degamma); } points->end_exponent = 0; points->x_point_at_y1_red = 1; diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0f5ad54d3fd3..de177ce8ca80 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -501,6 +501,32 @@ enum atom_cooling_solution_id{ LIQUID_COOLING = 0x01 }; +struct atom_firmware_info_v3_2 { + struct atom_common_table_header table_header; + uint32_t firmware_revision; + uint32_t bootup_sclk_in10khz; + uint32_t bootup_mclk_in10khz; + uint32_t firmware_capability; // enum atombios_firmware_capability + uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */ + uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address + uint16_t bootup_vddc_mv; + uint16_t bootup_vddci_mv; + uint16_t bootup_mvddc_mv; + uint16_t bootup_vddgfx_mv; + uint8_t mem_module_id; + uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */ + uint8_t reserved1[2]; + uint32_t mc_baseaddr_high; + uint32_t mc_baseaddr_low; + uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def + uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id + uint8_t board_i2c_feature_slave_addr; + uint8_t reserved3; + uint16_t bootup_mvddq_mv; + uint16_t bootup_mvpp_mv; + uint32_t zfbstartaddrin16mb; + uint32_t reserved2[3]; +}; /* *************************************************************************** @@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2 uint32_t rlc_gpu_timer_refclk; }; - +struct atom_gfx_info_v2_3 { + struct atom_common_table_header table_header; + uint8_t gfxip_min_ver; + uint8_t gfxip_max_ver; + uint8_t max_shader_engines; + uint8_t max_tile_pipes; + uint8_t max_cu_per_sh; + uint8_t max_sh_per_se; + uint8_t max_backends_per_se; + uint8_t max_texture_channel_caches; + uint32_t regaddr_cp_dma_src_addr; + uint32_t regaddr_cp_dma_src_addr_hi; + uint32_t regaddr_cp_dma_dst_addr; + uint32_t regaddr_cp_dma_dst_addr_hi; + uint32_t regaddr_cp_dma_command; + uint32_t regaddr_cp_status; + uint32_t regaddr_rlc_gpu_clock_32; + uint32_t rlc_gpu_timer_refclk; + uint8_t active_cu_per_sh; + uint8_t active_rb_per_se; + uint16_t gcgoldenoffset; + uint32_t rm21_sram_vmin_value; +}; /* *************************************************************************** @@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1 uint8_t fw_ctf_polarity; // GPIO polarity for CTF }; +struct atom_smu_info_v3_2 { + struct atom_common_table_header table_header; + uint8_t smuip_min_ver; + uint8_t smuip_max_ver; + uint8_t smu_rsd1; + uint8_t gpuclk_ss_mode; + uint16_t sclk_ss_percentage; + uint16_t sclk_ss_rate_10hz; + uint16_t gpuclk_ss_percentage; // in unit of 0.001% + uint16_t gpuclk_ss_rate_10hz; + uint32_t core_refclk_10khz; + uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid + uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching + uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid + uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event + uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid + uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event + uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid + uint8_t fw_ctf_polarity; // GPIO polarity for CTF + uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid + uint8_t pcc_gpio_polarity; // GPIO polarity for CTF + uint16_t smugoldenoffset; + uint32_t gpupll_vco_freq_10khz; + uint32_t bootup_smnclk_10khz; + uint32_t bootup_socclk_10khz; + uint32_t bootup_mp0clk_10khz; + uint32_t bootup_mp1clk_10khz; + uint32_t bootup_lclk_10khz; + uint32_t bootup_dcefclk_10khz; + uint32_t ctf_threshold_override_value; + uint32_t reserved[5]; +}; + +struct atom_smu_info_v3_3 { + struct atom_common_table_header table_header; + uint8_t smuip_min_ver; + uint8_t smuip_max_ver; + uint8_t smu_rsd1; + uint8_t gpuclk_ss_mode; + uint16_t sclk_ss_percentage; + uint16_t sclk_ss_rate_10hz; + uint16_t gpuclk_ss_percentage; // in unit of 0.001% + uint16_t gpuclk_ss_rate_10hz; + uint32_t core_refclk_10khz; + uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid + uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching + uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid + uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event + uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid + uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event + uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid + uint8_t fw_ctf_polarity; // GPIO polarity for CTF + uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid + uint8_t pcc_gpio_polarity; // GPIO polarity for CTF + uint16_t smugoldenoffset; + uint32_t gpupll_vco_freq_10khz; + uint32_t bootup_smnclk_10khz; + uint32_t bootup_socclk_10khz; + uint32_t bootup_mp0clk_10khz; + uint32_t bootup_mp1clk_10khz; + uint32_t bootup_lclk_10khz; + uint32_t bootup_dcefclk_10khz; + uint32_t ctf_threshold_override_value; + uint32_t syspll3_0_vco_freq_10khz; + uint32_t syspll3_1_vco_freq_10khz; + uint32_t bootup_fclk_10khz; + uint32_t bootup_waflclk_10khz; + uint32_t reserved[3]; +}; + /* *************************************************************************** Data Table smc_dpm_info structure @@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1 uint32_t boardreserved[10]; }; - /* *************************************************************************** Data Table asic_profiling_info structure @@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK }; +enum atom_smu11_syspll_id { + SMU11_SYSPLL0_ID = 0, + SMU11_SYSPLL1_0_ID = 1, + SMU11_SYSPLL1_1_ID = 2, + SMU11_SYSPLL1_2_ID = 3, + SMU11_SYSPLL2_ID = 4, + SMU11_SYSPLL3_0_ID = 5, + SMU11_SYSPLL3_1_ID = 6, +}; + + +enum atom_smu11_syspll0_clock_id { + SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK + SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK + SMU11_SYSPLL0_DCLK_ID = 2, // DCLK + SMU11_SYSPLL0_VCLK_ID = 3, // VCLK + SMU11_SYSPLL0_ECLK_ID = 4, // ECLK + SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK +}; + + +enum atom_smu11_syspll1_0_clock_id { + SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a +}; + +enum atom_smu11_syspll1_1_clock_id { + SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b +}; + +enum atom_smu11_syspll1_2_clock_id { + SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK +}; + +enum atom_smu11_syspll2_clock_id { + SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK +}; + +enum atom_smu11_syspll3_0_clock_id { + SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK + SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK + SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK +}; + +enum atom_smu11_syspll3_1_clock_id { + SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK + SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK + SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK +}; + struct atom_get_smu_clock_info_output_parameters_v3_1 { union { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index add90675fd2a..18b5b2ff47fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -79,12 +79,13 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static const struct profile_mode_setting smu7_profiling[5] = +static const struct profile_mode_setting smu7_profiling[6] = {{1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 10, 16, 31}, {1, 0, 11, 50, 1, 0, 100, 10}, {1, 0, 5, 30, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0}, }; /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ @@ -4743,23 +4744,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } dep_table = table_info->vdd_dep_on_sclk; odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } } static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, @@ -4860,6 +4865,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); for (i = 0; i < len; i++) { + if (i == hwmgr->power_profile_mode) { + size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", + i, profile_name[i], "*", + data->current_profile_setting.sclk_up_hyst, + data->current_profile_setting.sclk_down_hyst, + data->current_profile_setting.sclk_activity, + data->current_profile_setting.mclk_up_hyst, + data->current_profile_setting.mclk_down_hyst, + data->current_profile_setting.mclk_activity); + continue; + } if (smu7_profiling[i].bupdate_sclk) size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", i, profile_name[i], smu7_profiling[i].sclk_up_hyst, @@ -4879,24 +4895,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) "-", "-", "-"); } - size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n", - i, profile_name[i], - data->custom_profile_setting.sclk_up_hyst, - data->custom_profile_setting.sclk_down_hyst, - data->custom_profile_setting.sclk_activity, - data->custom_profile_setting.mclk_up_hyst, - data->custom_profile_setting.mclk_down_hyst, - data->custom_profile_setting.mclk_activity); - - size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n", - "*", "CURRENT", - data->current_profile_setting.sclk_up_hyst, - data->current_profile_setting.sclk_down_hyst, - data->current_profile_setting.sclk_activity, - data->current_profile_setting.mclk_up_hyst, - data->current_profile_setting.mclk_down_hyst, - data->current_profile_setting.mclk_activity); - return size; } @@ -4935,16 +4933,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint if (size < 8) return -EINVAL; - data->custom_profile_setting.bupdate_sclk = input[0]; - data->custom_profile_setting.sclk_up_hyst = input[1]; - data->custom_profile_setting.sclk_down_hyst = input[2]; - data->custom_profile_setting.sclk_activity = input[3]; - data->custom_profile_setting.bupdate_mclk = input[4]; - data->custom_profile_setting.mclk_up_hyst = input[5]; - data->custom_profile_setting.mclk_down_hyst = input[6]; - data->custom_profile_setting.mclk_activity = input[7]; - if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) { - memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting)); + tmp.bupdate_sclk = input[0]; + tmp.sclk_up_hyst = input[1]; + tmp.sclk_down_hyst = input[2]; + tmp.sclk_activity = input[3]; + tmp.bupdate_mclk = input[4]; + tmp.mclk_up_hyst = input[5]; + tmp.mclk_down_hyst = input[6]; + tmp.mclk_activity = input[7]; + if (!smum_update_dpm_settings(hwmgr, &tmp)) { + memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); hwmgr->power_profile_mode = mode; } break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f40179c9ca97..b8d0bb378595 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -325,7 +325,6 @@ struct smu7_hwmgr { uint16_t mem_latency_high; uint16_t mem_latency_low; uint32_t vr_config; - struct profile_mode_setting custom_profile_setting; struct profile_mode_setting current_profile_setting; }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 03bc7453f3b1..d9e92e306535 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -852,12 +852,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - n = (n & 0xff) << 8; - if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PkgPwrSetLimit, n); + PPSMC_MSG_PkgPwrSetLimit, n<<8); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index fb696e3d06cf..2f8a3b983cce 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,8 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[15]; + uint32_t Reserved[14]; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3aa65bdecb0e..684ac626ac53 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -74,6 +74,7 @@ config DRM_SIL_SII8620 tristate "Silicon Image SII8620 HDMI/MHL bridge" depends on OF && RC_CORE select DRM_KMS_HELPER + imply EXTCON help Silicon Image SII8620 HDMI/MHL bridge chip driver. diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 498d5948d1a8..9837c8d69e69 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector) } drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; fallback: /* diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 7d25c42f22db..c825c76edc1d 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->connectors[i].state); state->connectors[i].ptr = NULL; state->connectors[i].state = NULL; + state->connectors[i].old_state = NULL; + state->connectors[i].new_state = NULL; drm_connector_put(connector); } @@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->crtcs[i].ptr = NULL; state->crtcs[i].state = NULL; + state->crtcs[i].old_state = NULL; + state->crtcs[i].new_state = NULL; } for (i = 0; i < config->num_total_plane; i++) { @@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->planes[i].state); state->planes[i].ptr = NULL; state->planes[i].state = NULL; + state->planes[i].old_state = NULL; + state->planes[i].new_state = NULL; } for (i = 0; i < state->num_private_objs; i++) { @@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->private_objs[i].state); state->private_objs[i].ptr = NULL; state->private_objs[i].state = NULL; + state->private_objs[i].old_state = NULL; + state->private_objs[i].new_state = NULL; } state->num_private_objs = 0; diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 02a50929af67..e7f4fe2848a5 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, { uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; ssize_t ret; + int retry; if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) return 0; - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, - &tmds_oen, sizeof(tmds_oen)); - if (ret) { - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", - enable ? "enable" : "disable"); - return ret; + /* + * LSPCON adapters in low-power state may ignore the first write, so + * read back and verify the written value a few times. + */ + for (retry = 0; retry < 3; retry++) { + uint8_t tmp; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", + enable ? "enable" : "disable", + retry + 1); + return ret; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmp, sizeof(tmp)); + if (ret) { + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", + enable ? "enabling" : "disabling", + retry + 1); + return ret; + } + + if (tmp == tmds_oen) + return 0; } - return 0; + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", + enable ? "enabling" : "disabling"); + + return -EIO; } EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 134069f36482..39f1db4acda4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector) info->max_tmds_clock = 0; info->dvi_dual = false; info->has_hdmi_infoframe = false; + memset(&info->hdmi, 0, sizeof(info->hdmi)); info->non_desktop = 0; } @@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi u32 quirks = edid_get_quirks(edid); + drm_reset_display_info(connector); + info->width_mm = edid->width_cm * 10; info->height_mm = edid->height_cm * 10; - /* driver figures it out in this case */ - info->bpc = 0; - info->color_formats = 0; - info->cea_rev = 0; - info->max_tmds_clock = 0; - info->dvi_dual = false; - info->has_hdmi_infoframe = false; - info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index e394799979a6..6d9b9453707c 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) return -ENOMEM; filp->private_data = priv; + filp->f_mode |= FMODE_UNSIGNED_OFFSET; priv->filp = filp; priv->pid = get_pid(task_pid(current)); priv->minor = minor; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 0faaf829f5bf..f0e79178bde6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -18,6 +18,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <uapi/drm/exynos_drm.h> #include "exynos_drm_drv.h" @@ -26,20 +27,6 @@ #include "exynos_drm_iommu.h" #include "exynos_drm_crtc.h" -#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) - -/* - * exynos specific framebuffer structure. - * - * @fb: drm framebuffer obejct. - * @exynos_gem: array of exynos specific gem object containing a gem object. - */ -struct exynos_drm_fb { - struct drm_framebuffer fb; - struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; - dma_addr_t dma_addr[MAX_FB_BUFFER]; -}; - static int check_fb_gem_memory_type(struct drm_device *drm_dev, struct exynos_drm_gem *exynos_gem) { @@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev, return 0; } -static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - unsigned int i; - - drm_framebuffer_cleanup(fb); - - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { - struct drm_gem_object *obj; - - if (exynos_fb->exynos_gem[i] == NULL) - continue; - - obj = &exynos_fb->exynos_gem[i]->base; - drm_gem_object_unreference_unlocked(obj); - } - - kfree(exynos_fb); - exynos_fb = NULL; -} - -static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int *handle) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - - return drm_gem_handle_create(file_priv, - &exynos_fb->exynos_gem[0]->base, handle); -} - static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { - .destroy = exynos_drm_fb_destroy, - .create_handle = exynos_drm_fb_create_handle, + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, }; struct drm_framebuffer * @@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct exynos_drm_gem **exynos_gem, int count) { - struct exynos_drm_fb *exynos_fb; + struct drm_framebuffer *fb; int i; int ret; - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { @@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev, if (ret < 0) goto err; - exynos_fb->exynos_gem[i] = exynos_gem[i]; - exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr - + mode_cmd->offsets[i]; + fb->obj[i] = &exynos_gem[i]->base; } - drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); + ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); if (ret < 0) { DRM_ERROR("failed to initialize framebuffer\n"); goto err; } - return &exynos_fb->fb; + return fb; err: - kfree(exynos_fb); + kfree(fb); return ERR_PTR(ret); } @@ -191,12 +145,13 @@ err: dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + struct exynos_drm_gem *exynos_gem; if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) return 0; - return exynos_fb->dma_addr[index]; + exynos_gem = to_exynos_gem(fb->obj[index]); + return exynos_gem->dma_addr + fb->offsets[index]; } static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index abd84cbcf1c2..09c4bc0b1859 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder) drm_mode_connector_attach_encoder(connector, encoder); if (hdata->bridge) { - encoder->bridge = hdata->bridge; - hdata->bridge->encoder = encoder; ret = drm_bridge_attach(encoder, hdata->bridge, NULL); if (ret) DRM_ERROR("Failed to attach bridge\n"); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 257299ec95c4..272c79f5f5bf 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx, chroma_addr[1] = chroma_addr[0] + 0x40; } else { luma_addr[1] = luma_addr[0] + fb->pitches[0]; - chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; + chroma_addr[1] = chroma_addr[0] + fb->pitches[1]; } } else { luma_addr[1] = 0; @@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); + vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -495,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | VP_IMG_VSIZE(fb->height)); /* chroma plane for NV12/NV21 is half the height of the luma plane */ - vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | + vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) | VP_IMG_VSIZE(fb->height / 2)); vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); - vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); vp_reg_write(ctx, VP_SRC_H_POSITION, VP_SRC_H_POSITION_VAL(state->src.x)); - vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); - vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2); + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2); vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); } else { + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); } @@ -699,6 +702,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) /* interlace scan need to check shadow register */ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + goto out; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + goto out; + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); if (base != shadow) diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h index c311f571bdf9..189cfa2470a8 100644 --- a/drivers/gpu/drm/exynos/regs-mixer.h +++ b/drivers/gpu/drm/exynos/regs-mixer.h @@ -47,6 +47,7 @@ #define MXR_MO 0x0304 #define MXR_RESOLUTION 0x0310 +#define MXR_CFG_S 0x2004 #define MXR_GRAPHIC0_BASE_S 0x2024 #define MXR_GRAPHIC1_BASE_S 0x2044 diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index db6b94dda5df..d85939bd7b47 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, s->workload->pending_events); + patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index dd96ffc878ac..6d8180e8d1e2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe; + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; + /* Disable Primary/Sprite/Cursor plane */ + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; + } + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; } diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index b555eb26f9ce..6f4f8e941fc2 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, struct intel_vgpu_fb_info *fb_info) { gvt_dmabuf->drm_format = fb_info->drm_format; + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; gvt_dmabuf->width = fb_info->width; gvt_dmabuf->height = fb_info->height; gvt_dmabuf->stride = fb_info->stride; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 6b50fe78dc1b..1c120683e958 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d29281231507..78e55aafc8bc 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, false, 0, mm->vgpu); } +static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, + struct intel_gvt_gtt_entry *entry, unsigned long index) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + + pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); +} + static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { @@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, return ret; } +static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + pfn = pte_ops->get_pfn(entry); + if (pfn != vgpu->gvt->gtt.scratch_mfn) + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, + pfn << PAGE_SHIFT); +} + static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); - m = e; if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); + m = e; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn @@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); - } else + } else { + ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); + ggtt_invalidate_pte(vgpu, &m); ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + } out: ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); @@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return PTR_ERR(gtt->ggtt_mm); } - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, false); return create_scratch_page_tree(vgpu); } @@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) /** * intel_vgpu_reset_ggtt - reset the GGTT entry * @vgpu: a vGPU + * @invalidate_old: invalidate old entries * * This function is called at the vGPU create stage * to reset all the GGTT entries. * */ -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry old_entry; u32 index; u32 num_entries; @@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } ggtt_invalidate(dev_priv); } @@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) * removing the shadow pages. */ intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, true); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index a8b369cd352b..3792f2b7f4ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -193,7 +193,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8c5d5d005854..a33c1c3e4a21 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; + /* fall through */ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c16a492449d7..1466d8769ec9 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } - return 0; + return -ENOTTY; } static ssize_t diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 84ca369f15a5..3b4daafebdcb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = i915_ggtt_probe_hw(dev_priv); if (ret) - return ret; + goto err_perf; - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_ggtt_init_hw(dev_priv); if (ret) - return ret; + goto err_ggtt; ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; + goto err_ggtt; } pci_set_master(pdev); @@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = intel_gvt_init(dev_priv); if (ret) - goto out_ggtt; + goto err_ggtt; return 0; -out_ggtt: +err_ggtt: i915_ggtt_cleanup_hw(dev_priv); - +err_perf: + i915_perf_fini(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8c170db8495d..0414228cd2b5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kfree(lut); + kmem_cache_free(eb->i915->luts, lut); goto err_obj; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d8feb9053e0c..f0519e31543a 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock(&kdev->power.lock); - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + /* + * After the above branch intel_runtime_pm_get_if_in_use failed + * to get the runtime PM reference we cannot assume we are in + * runtime suspend since we can either: a) race with coming out + * of it before we took the power.lock, or b) there are other + * states than suspended which can bring us here. + * + * We need to double-check that we are indeed currently runtime + * suspended and if not we cannot do better than report the last + * known RC6 value. + */ + if (kdev->power.runtime_status == RPM_SUSPENDED) { + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; - spin_unlock(&kdev->power.lock); + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - val = jiffies_to_nsecs(val); - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; + } + spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 709d6ca68074..3ea566f99450 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); u32 tmp; - if (!IS_GEN9_BC(dev_priv)) + if (!IS_GEN9(dev_priv)) return; i915_audio_component_get_power(kdev); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c5c7530ba157..447b721c3be9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, return; aux_channel = child->aux_channel; - ddc_pin = child->ddc_pin; is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); if (is_dvi) { - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); - - sanitize_ddc_pin(dev_priv, port); + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { + info->alternate_ddc_pin = ddc_pin; + sanitize_ddc_pin(dev_priv, port); + } else { + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " + "sticking to defaults\n", + port_name(port), ddc_pin); + } } if (is_dp) { diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index fc8b2c6e3508..704ddb4d3ca7 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) } } - /* According to BSpec, "The CD clock frequency must be at least twice + /* + * According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. + * + * FIXME: Check the actual, not default, BCLK being used. + * + * FIXME: This does not depend on ->has_audio because the higher CDCLK + * is required for audio probe, also when there are no audio capable + * displays connected at probe time. This leads to unnecessarily high + * CDCLK when audio is not required. + * + * FIXME: This limit is only applied when there are displays connected + * at probe time. If we probe without displays, we'll still end up using + * the platform minimum CDCLK, failing audio probe. */ - if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) + if (INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); /* @@ -2290,9 +2302,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) return 0; } +static int skl_dpll0_vco(struct intel_atomic_state *intel_state) +{ + struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + int vco, i; + + vco = intel_state->cdclk.logical.vco; + if (!vco) + vco = dev_priv->skl_preferred_vco_freq; + + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + if (!crtc_state->base.enable) + continue; + + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + continue; + + /* + * DPLL0 VCO may need to be adjusted to get the correct + * clock for eDP. This will affect cdclk as well. + */ + switch (crtc_state->port_clock / 2) { + case 108000: + case 216000: + vco = 8640000; + break; + default: + vco = 8100000; + break; + } + } + + return vco; +} + static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); int min_cdclk, cdclk, vco; @@ -2300,9 +2347,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) if (min_cdclk < 0) return min_cdclk; - vco = intel_state->cdclk.logical.vco; - if (!vco) - vco = dev_priv->skl_preferred_vco_freq; + vco = skl_dpll0_vco(intel_state); /* * FIXME should also account for plane ratio diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 41e6c75a7f3c..f9550ea46c26 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -35,6 +35,7 @@ */ #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" +MODULE_FIRMWARE(I915_CSR_GLK); #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b48fd2561fe..56004ffbd8bb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15178,6 +15178,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); if (crtc_state->base.active) { intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); + crtc->base.mode.hdisplay = crtc_state->pipe_src_w; + crtc->base.mode.vdisplay = crtc_state->pipe_src_h; intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9a4a51e79fa1..b7b4cfdeb974 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1881,26 +1881,6 @@ found: reduce_m_n); } - /* - * DPLL0 VCO may need to be adjusted to get the correct - * clock for eDP. This will affect cdclk as well. - */ - if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) { - int vco; - - switch (pipe_config->port_clock / 2) { - case 108000: - case 216000: - vco = 8640000; - break; - default: - vco = 8100000; - break; - } - - to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco; - } - if (!HAS_DDI(dev_priv)) intel_dp_set_clock(encoder, pipe_config); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d4368589b355..a80fbad9be0f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -49,12 +49,12 @@ * check the condition before the timeout. */ #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ - unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ + const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ int ret__; \ might_sleep(); \ for (;;) { \ - bool expired__ = time_after(jiffies, timeout__); \ + const bool expired__ = ktime_after(ktime_get_raw(), end__); \ OP; \ if (COND) { \ ret__ = 0; \ diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6f12adc06365..6467a5cc2ca3 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) return; intel_fbdev_sync(ifbdev); - if (ifbdev->vma) + if (ifbdev->vma || ifbdev->helper.deferred_setup) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 697af5add78b..e3a5f673ff67 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * know the next preemption status we see corresponds * to this ELSP update. */ + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); GEM_BUG_ON(!port_count(&port[0])); if (port_count(&port[0]) > 1) goto unlock; @@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) memset(port, 0, sizeof(*port)); port++; } + + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data) if (fw) intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } static void queue_request(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d35d2d50f595..8691c86f579c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -326,7 +326,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); POSTING_READ(lvds_encoder->reg); - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000)) + + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(pipe_config, conn_state); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53ea564f971e..66de4b2dc8b7 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC6\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } void skl_disable_dc6(struct drm_i915_private *dev_priv) { DRM_DEBUG_KMS("Disabling DC6\n"); - /* Wa Display #1183: skl,kbl,cfl */ - if (IS_GEN9_BC(dev_priv)) - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - SKL_SELECT_ALTERNATE_DC_EXIT); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 6e5e1aa54ce1..b001699297c4 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); mdp4_crtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); blend_setup(crtc); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 9893e43ba6c5..76b96081916f 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); mdp5_crtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); /* diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c index b4a8aa4490ee..005760bee708 100644 --- a/drivers/gpu/drm/msm/disp/mdp_format.c +++ b/drivers/gpu/drm/msm/disp/mdp_format.c @@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, return i; } -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, + uint64_t modifier) { int i; for (i = 0; i < ARRAY_SIZE(formats); i++) { diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h index 1185487e7e5e..4fa8dbe4e165 100644 --- a/drivers/gpu/drm/msm/disp/mdp_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h @@ -98,7 +98,7 @@ struct mdp_format { #define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); /* MDP capabilities */ #define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 7a03a9489708..8baba30d6c65 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -173,6 +173,7 @@ struct msm_dsi_host { bool registered; bool power_on; + bool enabled; int irq; }; @@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( switch (mipi_fmt) { case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666_PACKED: - case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; default: return CMD_DST_FORMAT_RGB888; } @@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) static void dsi_wait4video_done(struct msm_dsi_host *msm_host) { + u32 ret = 0; + struct device *dev = &msm_host->pdev->dev; + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); reinit_completion(&msm_host->video_comp); - wait_for_completion_timeout(&msm_host->video_comp, + ret = wait_for_completion_timeout(&msm_host->video_comp, msecs_to_jiffies(70)); + if (ret <= 0) + dev_err(dev, "wait for video done timed out\n"); + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); } @@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) return; - if (msm_host->power_on) { + if (msm_host->power_on && msm_host->enabled) { dsi_wait4video_done(msm_host); /* delay 4 ms to skip BLLP */ usleep_range(2000, 4000); @@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host) * pm_runtime_put_autosuspend(&msm_host->pdev->dev); * } */ - + msm_host->enabled = true; return 0; } @@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + msm_host->enabled = false; dsi_op_mode_config(msm_host, !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 8e9d5c255820..9a9fa0c75a13 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8, lpx; + s32 tmax, tmin; + s32 pcnt0 = 50; + s32 pcnt1 = 50; + s32 pcnt2 = 10; + s32 pcnt3 = 30; + s32 pcnt4 = 10; + s32 pcnt5 = 2; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en, hb_en_ckln; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + timing->hs_halfbyte_en = 0; + hb_en = 0; + timing->hs_halfbyte_en_ckln = 0; + hb_en_ckln = 0; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); + + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); + + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp / ui_x8) - 1; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); + + temp = 60 * coeff + 52 * ui - 43 * ui; + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + timing->shared_timings.clk_post = + linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 8 * ui + (timing->clk_prepare << 3) * ui; + temp += (((timing->clk_zero + 3) << 3) + 11) * ui; + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : + (((timing->hs_rqst_ckln << 3) + 8) * ui); + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + if (tmin > tmax) { + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = 1; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, + timing->hs_prep_dly_ckln); + + return 0; +} + void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask) { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index c56268cbdb3d..a24ab80994a3 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index 0af951aaeea1..b3fffc8dbb2a 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); } -static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, - struct msm_dsi_phy_clk_request *clk_req) -{ - /* - * TODO: These params need to be computed, they're currently hardcoded - * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a - * default escape clock of 19.2 Mhz. - */ - - timing->hs_halfbyte_en = 0; - timing->clk_zero = 0x1c; - timing->clk_prepare = 0x07; - timing->clk_trail = 0x07; - timing->hs_exit = 0x23; - timing->hs_zero = 0x21; - timing->hs_prepare = 0x07; - timing->hs_trail = 0x07; - timing->hs_rqst = 0x05; - timing->ta_sure = 0x00; - timing->ta_go = 0x03; - timing->ta_get = 0x04; - - timing->shared_timings.clk_pre = 0x2d; - timing->shared_timings.clk_post = 0x0d; - - return 0; -} - static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, struct msm_dsi_phy_clk_request *clk_req) { diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 0e0c87252ab0..7a16242bf8bf 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); - format = kms->funcs->get_format(kms, mode_cmd->pixel_format); + format = kms->funcs->get_format(kms, mode_cmd->pixel_format, + mode_cmd->modifier[0]); if (!format) { dev_err(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index c178563fcd4d..456622b46335 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); - ret = PTR_ERR(fb); - goto fail; + return PTR_ERR(fb); } bo = msm_framebuffer_bo(fb, 0); @@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fail_unlock: mutex_unlock(&dev->struct_mutex); -fail: - - if (ret) { - if (fb) - drm_framebuffer_remove(fb); - } - + drm_framebuffer_remove(fb); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 95196479f651..f583bb4222f9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { - /* For non-cached buffers, ensure the new pages are clean - * because display controller, GPU, etc. are not coherent: - */ - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + if (msm_obj->sgt) { + /* For non-cached buffers, ensure the new + * pages are clean because display controller, + * GPU, etc. are not coherent: + */ + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, + DMA_BIDIRECTIONAL); - if (msm_obj->sgt) sg_free_table(msm_obj->sgt); - - kfree(msm_obj->sgt); + kfree(msm_obj->sgt); + } if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 17d5824417ad..aaa329dc020e 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -48,8 +48,11 @@ struct msm_kms_funcs { /* functions to wait for atomic commit completed on each CRTC */ void (*wait_for_crtc_commit_done)(struct msm_kms *kms, struct drm_crtc *crtc); + /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ + const struct msm_format *(*get_format)(struct msm_kms *kms, + const uint32_t format, + const uint64_t modifiers); /* misc: */ - const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); int (*set_split_display)(struct msm_kms *kms, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6f402c4f2bdd..ab61c038f42c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); nvbo->bo.bdev = &drm->ttm.bdev; - nvbo->cli = cli; /* This is confusing, and doesn't actually mean we want an uncached * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index be8e00b49cde..73c48440d4d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -26,8 +26,6 @@ struct nouveau_bo { struct list_head vma_list; - struct nouveau_cli *cli; - unsigned contig:1; unsigned page:5; unsigned kind:8; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index dff51a0ee028..8c093ca4222e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_drm *drm = nvbo->cli->drm; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_mem *mem; int ret; @@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_drm *drm = nvbo->cli->drm; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_mem *mem; int ret; @@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_drm *drm = nvbo->cli->drm; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_mem *mem; int ret; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 8bd739cfd00d..2b3ccd850750 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3264,10 +3264,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_unregister(&mstc->connector); - drm_modeset_lock_all(drm->dev); drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); + + drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL); mstc->port = NULL; - drm_modeset_unlock_all(drm->dev); + drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); drm_connector_unreference(&mstc->connector); } @@ -3277,9 +3278,7 @@ nv50_mstm_register_connector(struct drm_connector *connector) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - drm_modeset_lock_all(drm->dev); drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector); - drm_modeset_unlock_all(drm->dev); drm_connector_register(connector); } diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 5e2e65e88847..7f3ac6b13b56 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -828,6 +828,12 @@ static void dispc_ovl_set_scale_coef(struct dispc_device *dispc, h_coef = dispc_ovl_get_scale_coef(fir_hinc, true); v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps); + if (!h_coef || !v_coef) { + dev_err(&dispc->pdev->dev, "%s: failed to find scale coefs\n", + __func__); + return; + } + for (i = 0; i < 8; i++) { u32 h, hv; @@ -2342,7 +2348,7 @@ static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc, } if (in_width > maxsinglelinewidth) { - DSSERR("Cannot scale max input width exceeded"); + DSSERR("Cannot scale max input width exceeded\n"); return -EINVAL; } return 0; @@ -2424,13 +2430,13 @@ again: } if (in_width > (maxsinglelinewidth * 2)) { - DSSERR("Cannot setup scaling"); - DSSERR("width exceeds maximum width possible"); + DSSERR("Cannot setup scaling\n"); + DSSERR("width exceeds maximum width possible\n"); return -EINVAL; } if (in_width > maxsinglelinewidth && *five_taps) { - DSSERR("cannot setup scaling with five taps"); + DSSERR("cannot setup scaling with five taps\n"); return -EINVAL; } return 0; @@ -2472,7 +2478,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, in_width > maxsinglelinewidth && ++*decim_x); if (in_width > maxsinglelinewidth) { - DSSERR("Cannot scale width exceeds max line width"); + DSSERR("Cannot scale width exceeds max line width\n"); return -EINVAL; } @@ -2490,7 +2496,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, * bandwidth. Despite what theory says this appears to * be true also for 16-bit color formats. */ - DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)", *decim_x); + DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)\n", *decim_x); return -EINVAL; } @@ -4633,7 +4639,7 @@ static int dispc_errata_i734_wa_init(struct dispc_device *dispc) i734_buf.size, &i734_buf.paddr, GFP_KERNEL); if (!i734_buf.vaddr) { - dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed", + dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed\n", __func__); return -ENOMEM; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 97c88861d67a..5879f45f6fc9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -679,7 +679,7 @@ static int hdmi_audio_config(struct device *dev, struct omap_dss_audio *dss_audio) { struct omap_hdmi *hd = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&hd->lock); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 35ed2add6189..813ba42f2753 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -922,8 +922,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { const struct hdmi4_features *features; struct resource *res; + const struct soc_device_attribute *soc; - features = soc_device_match(hdmi4_soc_devices)->data; + soc = soc_device_match(hdmi4_soc_devices); + if (!soc) + return -ENODEV; + + features = soc->data; core->cts_swmode = features->cts_swmode; core->audio_use_mclk = features->audio_use_mclk; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index d28da9ac3e90..ae1a001d1b83 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -671,7 +671,7 @@ static int hdmi_audio_config(struct device *dev, struct omap_dss_audio *dss_audio) { struct omap_hdmi *hd = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&hd->lock); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index a0d7b1d905e8..5cde26ac937b 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -121,6 +121,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) if (dssdrv->read_edid) { void *edid = kzalloc(MAX_EDID, GFP_KERNEL); + if (!edid) + return 0; + if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && drm_edid_is_valid(edid)) { drm_mode_connector_update_edid_property( @@ -139,6 +142,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = drm_mode_create(dev); struct videomode vm = {0}; + if (!mode) + return 0; + dssdrv->get_timings(dssdev, &vm); drm_display_mode_from_videomode(&vm, mode); @@ -200,6 +206,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector, if (!r) { /* check if vrefresh is still valid */ new_mode = drm_mode_duplicate(dev, mode); + + if (!new_mode) + return MODE_BAD; + new_mode->clock = vm.pixelclock / 1000; new_mode->vrefresh = 0; if (mode->vrefresh == drm_mode_vrefresh(new_mode)) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f9fa1c90b35c..401c02e9e6b2 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -401,12 +401,16 @@ int tiler_unpin(struct tiler_block *block) struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, u16 h, u16 align) { - struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); + struct tiler_block *block; u32 min_align = 128; int ret; unsigned long flags; u32 slot_bytes; + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return ERR_PTR(-ENOMEM); + BUG_ON(!validfmt(fmt)); /* convert width/height to slots */ diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index d7f7bc9f061a..817be3c41863 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -90,7 +90,7 @@ static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset, { int i; unsigned long index; - bool area_free; + bool area_free = false; unsigned long slots_per_band = PAGE_SIZE / slot_bytes; unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0; unsigned long curr_bit = bit_offset; diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index c0fb52c6d4ca..01665b98c57e 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea uint32_t type, bool interruptible) { struct qxl_command cmd; - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); cmd.type = type; - cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); return qxl_ring_push(qdev->command_ring, &cmd, interruptible); } @@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas uint32_t type, bool interruptible) { struct qxl_command cmd; - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); cmd.type = type; - cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 00a1a66b052a..864b456080c4 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -167,6 +167,7 @@ struct qxl_release { int id; int type; + struct qxl_bo *release_bo; uint32_t release_offset; uint32_t surface_release_id; struct ww_acquire_ctx ticket; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index e238a1a2eca1..6cc9f3367fa0 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev, goto out_free_reloc; /* TODO copy slow path code from i915 */ - fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK)); unwritten = __copy_from_user_inatomic_nocache - (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), + (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK), u64_to_user_ptr(cmd->command), cmd->command_size); { diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 5d84a66fed36..7cb214577275 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release) list_del(&entry->tv.head); kfree(entry); } + release->release_bo = NULL; } void @@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, { if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { int idr_ret; - struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); struct qxl_bo *bo; union qxl_release_info *info; @@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); if (idr_ret < 0) return idr_ret; - bo = to_qxl_bo(entry->tv.bo); + bo = create_rel->release_bo; + (*release)->release_bo = bo; (*release)->release_offset = create_rel->release_offset + 64; qxl_release_list_add(*release, bo); @@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); + (*release)->release_bo = bo; (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; qdev->current_release_bo_offset[cur_idx]++; @@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, { void *ptr; union qxl_release_info *info; - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); - struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); + struct qxl_bo *bo = release->release_bo; - ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); + ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK); if (!ptr) return NULL; - info = ptr + (release->release_offset & ~PAGE_SIZE); + info = ptr + (release->release_offset & ~PAGE_MASK); return info; } @@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev, struct qxl_release *release, union qxl_release_info *info) { - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); - struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); + struct qxl_bo *bo = release->release_bo; void *ptr; - ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); + ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK); qxl_bo_kunmap_atomic_page(qdev, bo, ptr); } diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index bffff4c9fbf5..be3f14d7746d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder) } } -static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc, - const struct drm_display_mode *mode) -{ - struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc); - struct sun4i_tcon *tcon = lvds->tcon; - u32 hsync = mode->hsync_end - mode->hsync_start; - u32 vsync = mode->vsync_end - mode->vsync_start; - unsigned long rate = mode->clock * 1000; - long rounded_rate; - - DRM_DEBUG_DRIVER("Validating modes...\n"); - - if (hsync < 1) - return MODE_HSYNC_NARROW; - - if (hsync > 0x3ff) - return MODE_HSYNC_WIDE; - - if ((mode->hdisplay < 1) || (mode->htotal < 1)) - return MODE_H_ILLEGAL; - - if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff)) - return MODE_BAD_HVALUE; - - DRM_DEBUG_DRIVER("Horizontal parameters OK\n"); - - if (vsync < 1) - return MODE_VSYNC_NARROW; - - if (vsync > 0x3ff) - return MODE_VSYNC_WIDE; - - if ((mode->vdisplay < 1) || (mode->vtotal < 1)) - return MODE_V_ILLEGAL; - - if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff)) - return MODE_BAD_VVALUE; - - DRM_DEBUG_DRIVER("Vertical parameters OK\n"); - - tcon->dclk_min_div = 7; - tcon->dclk_max_div = 7; - rounded_rate = clk_round_rate(tcon->dclk, rate); - if (rounded_rate < rate) - return MODE_CLOCK_LOW; - - if (rounded_rate > rate) - return MODE_CLOCK_HIGH; - - DRM_DEBUG_DRIVER("Clock rate OK\n"); - - return MODE_OK; -} - static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { .disable = sun4i_lvds_encoder_disable, .enable = sun4i_lvds_encoder_enable, - .mode_valid = sun4i_lvds_encoder_mode_valid, }; static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index f0481b7b60c5..06c94e3a5f15 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, while (npages >= HPAGE_PMD_NR) { gfp_t huge_flags = gfp_flags; - huge_flags |= GFP_TRANSHUGE; + huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + __GFP_KSWAPD_RECLAIM; huge_flags &= ~__GFP_MOVABLE; huge_flags &= ~__GFP_COMP; p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); @@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) GFP_USER | GFP_DMA32, "uc dma", 0); ttm_page_pool_init_locked(&_manager->wc_pool_huge, - GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), + (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + __GFP_KSWAPD_RECLAIM) & + ~(__GFP_MOVABLE | __GFP_COMP), "wc huge", order); ttm_page_pool_init_locked(&_manager->uc_pool_huge, - GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) + (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + __GFP_KSWAPD_RECLAIM) & + ~(__GFP_MOVABLE | __GFP_COMP) , "uc huge", order); _manager->options.max_size = max_pages; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 8a25d1974385..f63d99c302e4 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) gfp_flags |= __GFP_ZERO; if (huge) { - gfp_flags |= GFP_TRANSHUGE; + gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + __GFP_KSWAPD_RECLAIM; gfp_flags &= ~__GFP_MOVABLE; gfp_flags &= ~__GFP_COMP; } diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 2decc8e2c79f..add9cc97a3b6 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) vc4_bo_set_label(obj, -1); if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; @@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo) } if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index bf4667481935..c61dff594195 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -760,6 +760,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) struct vc4_async_flip_state { struct drm_crtc *crtc; struct drm_framebuffer *fb; + struct drm_framebuffer *old_fb; struct drm_pending_vblank_event *event; struct vc4_seqno_cb cb; @@ -789,6 +790,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) drm_crtc_vblank_put(crtc); drm_framebuffer_put(flip_state->fb); + + /* Decrement the BO usecnt in order to keep the inc/dec calls balanced + * when the planes are updated through the async update path. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made cleanup_fb() + * logic. + */ + if (flip_state->old_fb) { + struct drm_gem_cma_object *cma_bo; + struct vc4_bo *bo; + + cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); + bo = to_vc4_bo(&cma_bo->base); + vc4_bo_dec_usecnt(bo); + drm_framebuffer_put(flip_state->old_fb); + } + kfree(flip_state); up(&vc4->async_modeset); @@ -813,9 +831,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + /* Increment the BO usecnt here, so that we never end up with an + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the + * plane is later updated through the non-async path. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made prepare_fb() + * logic. + */ + ret = vc4_bo_inc_usecnt(bo); + if (ret) + return ret; + flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); - if (!flip_state) + if (!flip_state) { + vc4_bo_dec_usecnt(bo); return -ENOMEM; + } drm_framebuffer_get(fb); flip_state->fb = fb; @@ -826,10 +857,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, ret = down_interruptible(&vc4->async_modeset); if (ret) { drm_framebuffer_put(fb); + vc4_bo_dec_usecnt(bo); kfree(flip_state); return ret; } + /* Save the current FB before it's replaced by the new one in + * drm_atomic_set_fb_for_plane(). We'll need the old FB in + * vc4_async_page_flip_complete() to decrement the BO usecnt and keep + * it consistent. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made cleanup_fb() + * logic. + */ + flip_state->old_fb = plane->state->fb; + if (flip_state->old_fb) + drm_framebuffer_get(flip_state->old_fb); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); /* Immediately update the plane's legacy fb pointer, so that later diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 72c9dbd81d7f..f185812970da 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -96,7 +96,6 @@ struct vc4_dpi { struct platform_device *pdev; struct drm_encoder *encoder; - struct drm_connector *connector; void __iomem *regs; @@ -164,14 +163,31 @@ static void vc4_dpi_encoder_disable(struct drm_encoder *encoder) static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; struct drm_display_mode *mode = &encoder->crtc->mode; struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder); struct vc4_dpi *dpi = vc4_encoder->dpi; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector = NULL, *connector_scan; u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE; int ret; - if (dpi->connector->display_info.num_bus_formats) { - u32 bus_format = dpi->connector->display_info.bus_formats[0]; + /* Look up the connector attached to DPI so we can get the + * bus_format. Ideally the bridge would tell us the + * bus_format we want, but it doesn't yet, so assume that it's + * uniform throughout the bridge chain. + */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector_scan, &conn_iter) { + if (connector_scan->encoder == encoder) { + connector = connector_scan; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + + if (connector && connector->display_info.num_bus_formats) { + u32 bus_format = connector->display_info.bus_formats[0]; switch (bus_format) { case MEDIA_BUS_FMT_RGB888_1X24: @@ -199,6 +215,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) DRM_ERROR("Unknown media bus format %d\n", bus_format); break; } + } else { + /* Default to 24bit if no connector found. */ + dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT); } if (mode->flags & DRM_MODE_FLAG_NHSYNC) diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index ce39390be389..13dcaad06798 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -503,7 +503,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * the scl fields here. */ if (num_planes == 1) { - scl0 = vc4_get_scl_field(state, 1); + scl0 = vc4_get_scl_field(state, 0); scl1 = scl0; } else { scl0 = vc4_get_scl_field(state, 1); diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index d3f15bf60900..7cf82b071de2 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) fail: kfree(validation_state.branch_targets); if (validated_shader) { + kfree(validated_shader->uniform_addr_offsets); kfree(validated_shader->texture_samples); kfree(validated_shader); } diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 48e4f1df6e5d..020070d483d3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -293,7 +293,7 @@ retry: ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); spin_lock(&vgdev->ctrlq.qlock); goto retry; } else { @@ -368,7 +368,7 @@ retry: ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->cursorq.qlock); - wait_event(vgdev->cursorq.ack_queue, vq->num_free); + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); spin_lock(&vgdev->cursorq.qlock); goto retry; } else { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 2582ffd36bb5..ba0cdb743c3e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set) struct drm_crtc *crtc = set->crtc; struct drm_framebuffer *fb; struct drm_crtc *tmp; - struct drm_modeset_acquire_ctx *ctx; struct drm_device *dev = set->crtc->dev; + struct drm_modeset_acquire_ctx ctx; int ret; - ctx = dev->mode_config.acquire_ctx; + drm_modeset_acquire_init(&ctx, 0); restart: /* @@ -458,7 +458,7 @@ restart: fb = set->fb; - ret = crtc->funcs->set_config(set, ctx); + ret = crtc->funcs->set_config(set, &ctx); if (ret == 0) { crtc->primary->crtc = crtc; crtc->primary->fb = fb; @@ -473,20 +473,13 @@ restart: } if (ret == -EDEADLK) { - dev->mode_config.acquire_ctx = NULL; - -retry_locking: - drm_modeset_backoff(ctx); - - ret = drm_modeset_lock_all_ctx(dev, ctx); - if (ret) - goto retry_locking; - - dev->mode_config.acquire_ctx = ctx; - + drm_modeset_backoff(&ctx); goto restart; } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; } @@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info) } mutex_lock(&par->bo_mutex); - drm_modeset_lock_all(vmw_priv->dev); ret = vmw_fb_kms_framebuffer(info); if (ret) goto out_unlock; @@ -657,7 +649,6 @@ out_unlock: drm_mode_destroy(vmw_priv->dev, old_mode); par->set_mode = mode; - drm_modeset_unlock_all(vmw_priv->dev); mutex_unlock(&par->bo_mutex); return ret; @@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv) par->max_width = fb_width; par->max_height = fb_height; - drm_modeset_lock_all(vmw_priv->dev); ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, par->max_height, &par->con, &par->crtc, &init_mode); - if (ret) { - drm_modeset_unlock_all(vmw_priv->dev); + if (ret) goto err_kms; - } info->var.xres = init_mode->hdisplay; info->var.yres = init_mode->vdisplay; - drm_modeset_unlock_all(vmw_priv->dev); /* * Create buffers and alloc memory @@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv) cancel_delayed_work_sync(&par->local_work); unregister_framebuffer(info); + mutex_lock(&par->bo_mutex); (void) vmw_fb_kms_detach(par, true, true); + mutex_unlock(&par->bo_mutex); vfree(par->vmalloc); framebuffer_release(info); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f11601b6fd74..96fd7a03d2f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, struct vmw_display_unit *du; struct drm_display_mode *mode; int i = 0; + int ret = 0; + mutex_lock(&dev_priv->dev->mode_config.mutex); list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, head) { if (i == unit) @@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, if (i != unit) { DRM_ERROR("Could not find initial display unit.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } if (list_empty(&con->modes)) @@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, if (list_empty(&con->modes)) { DRM_ERROR("Could not find initial display mode.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } du = vmw_connector_to_du(con); @@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, head); } - return 0; + out_unlock: + mutex_unlock(&dev_priv->dev->mode_config.mutex); + + return ret; } /** diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 60252fd796f6..0000434a1fbd 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -462,10 +462,11 @@ config HID_LENOVO select NEW_LEDS select LEDS_CLASS ---help--- - Support for Lenovo devices that are not fully compliant with HID standard. + Support for IBM/Lenovo devices that are not fully compliant with HID standard. - Say Y if you want support for the non-compliant features of the Lenovo - Thinkpad standalone keyboards, e.g: + Say Y if you want support for horizontal scrolling of the IBM/Lenovo + Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad + standalone keyboards, e.g: - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint configuration) - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5a3a7ead3012..46f5ecd11bf7 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -525,6 +525,9 @@ #define I2C_VENDOR_ID_HANTICK 0x0911 #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 +#define I2C_VENDOR_ID_RAYD 0x2386 +#define I2C_PRODUCT_ID_RAYD_3118 0x3118 + #define USB_VENDOR_ID_HANWANG 0x0b57 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff @@ -549,6 +552,13 @@ #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e +#define USB_VENDOR_ID_IBM 0x04b3 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109 + #define USB_VENDOR_ID_IDEACOM 0x1cb6 #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 @@ -681,6 +691,7 @@ #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 +#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 @@ -961,6 +972,7 @@ #define USB_DEVICE_ID_SIS817_TOUCH 0x0817 #define USB_DEVICE_ID_SIS_TS 0x1013 #define USB_DEVICE_ID_SIS1030_TOUCH 0x1030 +#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb #define USB_VENDOR_ID_SKYCABLE 0x1223 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 6836a856c243..930652c25120 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: - if (dev->battery_report_type == HID_FEATURE_REPORT) { + if (dev->battery_status != HID_BATTERY_REPORTED && + !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; @@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_STATUS: - if (!dev->battery_reported && - dev->battery_report_type == HID_FEATURE_REPORT) { + if (dev->battery_status != HID_BATTERY_REPORTED && + !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; dev->battery_capacity = value; - dev->battery_reported = true; + dev->battery_status = HID_BATTERY_QUERIED; } - if (!dev->battery_reported) + if (dev->battery_status == HID_BATTERY_UNKNOWN) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else if (dev->battery_capacity == 100) val->intval = POWER_SUPPLY_STATUS_FULL; @@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, dev->battery_report_type = report_type; dev->battery_report_id = field->report->id; + /* + * Stylus is normally not connected to the device and thus we + * can't query the device and get meaningful battery strength. + * We have to wait for the device to report it on its own. + */ + dev->battery_avoid_query = report_type == HID_INPUT_REPORT && + field->physical == HID_DG_STYLUS; + dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { error = PTR_ERR(dev->battery); @@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value) capacity = hidinput_scale_battery_capacity(dev, value); - if (!dev->battery_reported || capacity != dev->battery_capacity) { + if (dev->battery_status != HID_BATTERY_REPORTED || + capacity != dev->battery_capacity) { dev->battery_capacity = capacity; - dev->battery_reported = true; + dev->battery_status = HID_BATTERY_REPORTED; power_supply_changed(dev->battery); } } diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 1ac4ff4d57a6..643b6eb54442 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -6,6 +6,17 @@ * * Copyright (c) 2012 Bernhard Seibold * Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk> + * + * Linux IBM/Lenovo Scrollpoint mouse driver: + * - IBM Scrollpoint III + * - IBM Scrollpoint Pro + * - IBM Scrollpoint Optical + * - IBM Scrollpoint Optical 800dpi + * - IBM Scrollpoint Optical 800dpi Pro + * - Lenovo Scrollpoint Optical + * + * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com> + * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com> */ /* @@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, return 0; } +static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + if (usage->hid == HID_GD_Z) { + hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); + return 1; + } + return 0; +} + static int lenovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev, case USB_DEVICE_ID_LENOVO_CBTKBD: return lenovo_input_mapping_cptkbd(hdev, hi, field, usage, bit, max); + case USB_DEVICE_ID_IBM_SCROLLPOINT_III: + case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO: + case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL: + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL: + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO: + case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL: + return lenovo_input_mapping_scrollpoint(hdev, hi, field, + usage, bit, max); default: return 0; } @@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) }, { } }; diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index fbfcc8009432..b39844adea47 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t int ret = 0, len; unsigned char report_number; + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { + ret = -ENODEV; + goto out; + } + dev = hidraw_table[minor]->hid; if (!dev->ll_driver->raw_request) { diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 97689e98e53f..cc33622253aa 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -47,6 +47,7 @@ /* quirks to control the device */ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) +#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) /* flags */ #define I2C_HID_STARTED 0 @@ -171,6 +172,10 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, + I2C_HID_QUIRK_RESEND_REPORT_DESCR }, + { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, + I2C_HID_QUIRK_RESEND_REPORT_DESCR }, { 0, 0 } }; @@ -1220,6 +1225,16 @@ static int i2c_hid_resume(struct device *dev) if (ret) return ret; + /* RAYDIUM device (2386:3118) need to re-send report descr cmd + * after resume, after this it will be back normal. + * otherwise it issues too many incomplete reports. + */ + if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) { + ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0); + if (ret) + return ret; + } + if (hid->driver && hid->driver->reset_resume) { ret = hid->driver->reset_resume(hid); return ret; diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index 157b44aacdff..acc2536c8094 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -77,21 +77,21 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf, struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; int curr_hid_dev = client_data->cur_hid_dev; - if (data_len < sizeof(struct hostif_msg_hdr)) { - dev_err(&client_data->cl_device->dev, - "[hid-ish]: error, received %u which is less than data header %u\n", - (unsigned int)data_len, - (unsigned int)sizeof(struct hostif_msg_hdr)); - ++client_data->bad_recv_cnt; - ish_hw_reset(hid_ishtp_cl->dev); - return; - } - payload = recv_buf + sizeof(struct hostif_msg_hdr); total_len = data_len; cur_pos = 0; do { + if (cur_pos + sizeof(struct hostif_msg) > total_len) { + dev_err(&client_data->cl_device->dev, + "[hid-ish]: error, received %u which is less than data header %u\n", + (unsigned int)data_len, + (unsigned int)sizeof(struct hostif_msg_hdr)); + ++client_data->bad_recv_cnt; + ish_hw_reset(hid_ishtp_cl->dev); + break; + } + recv_msg = (struct hostif_msg *)(recv_buf + cur_pos); payload_len = recv_msg->hdr.size; @@ -412,9 +412,7 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id, { struct ishtp_hid_data *hid_data = hid->driver_data; struct ishtp_cl_data *client_data = hid_data->client_data; - static unsigned char buf[10]; - unsigned int len; - struct hostif_msg_to_sensor *msg = (struct hostif_msg_to_sensor *)buf; + struct hostif_msg_to_sensor msg = {}; int rv; int i; @@ -426,14 +424,11 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id, return; } - len = sizeof(struct hostif_msg_to_sensor); - - memset(msg, 0, sizeof(struct hostif_msg_to_sensor)); - msg->hdr.command = (report_type == HID_FEATURE_REPORT) ? + msg.hdr.command = (report_type == HID_FEATURE_REPORT) ? HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT; for (i = 0; i < client_data->num_hid_devices; ++i) { if (hid == client_data->hid_sensor_hubs[i]) { - msg->hdr.device_id = + msg.hdr.device_id = client_data->hid_devices[i].dev_id; break; } @@ -442,8 +437,9 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id, if (i == client_data->num_hid_devices) return; - msg->report_id = report_id; - rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len); + msg.report_id = report_id; + rv = ishtp_cl_send(client_data->hid_ishtp_cl, (uint8_t *)&msg, + sizeof(msg)); if (rv) hid_ishtp_trace(client_data, "%s hid %p send failed\n", __func__, hid); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index f272cdd9bd55..2623a567ffba 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev, list_del(&device->device_link); spin_unlock_irqrestore(&dev->device_list_lock, flags); dev_err(dev->devc, "Failed to register ISHTP client device\n"); - kfree(device); + put_device(&device->dev); return NULL; } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b54ef1ffcbec..ee7a37eb159a 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1213,8 +1213,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom, devres->root = root; error = sysfs_create_group(devres->root, group); - if (error) + if (error) { + devres_free(devres); return error; + } devres_add(&wacom->hdev->dev, devres); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 6da16a879c9f..5f947ec20dcb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id) return tool_type; } +static void wacom_exit_report(struct wacom_wac *wacom) +{ + struct input_dev *input = wacom->pen_input; + struct wacom_features *features = &wacom->features; + unsigned char *data = wacom->data; + int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; + + /* + * Reset all states otherwise we lose the initial states + * when in-prox next time + */ + input_report_abs(input, ABS_X, 0); + input_report_abs(input, ABS_Y, 0); + input_report_abs(input, ABS_DISTANCE, 0); + input_report_abs(input, ABS_TILT_X, 0); + input_report_abs(input, ABS_TILT_Y, 0); + if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { + input_report_key(input, BTN_LEFT, 0); + input_report_key(input, BTN_MIDDLE, 0); + input_report_key(input, BTN_RIGHT, 0); + input_report_key(input, BTN_SIDE, 0); + input_report_key(input, BTN_EXTRA, 0); + input_report_abs(input, ABS_THROTTLE, 0); + input_report_abs(input, ABS_RZ, 0); + } else { + input_report_abs(input, ABS_PRESSURE, 0); + input_report_key(input, BTN_STYLUS, 0); + input_report_key(input, BTN_STYLUS2, 0); + input_report_key(input, BTN_TOUCH, 0); + input_report_abs(input, ABS_WHEEL, 0); + if (features->type >= INTUOS3S) + input_report_abs(input, ABS_Z, 0); + } + input_report_key(input, wacom->tool[idx], 0); + input_report_abs(input, ABS_MISC, 0); /* reset tool id */ + input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); + wacom->id[idx] = 0; +} + static int wacom_intuos_inout(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; @@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) if (!wacom->id[idx]) return 1; - /* - * Reset all states otherwise we lose the initial states - * when in-prox next time - */ - input_report_abs(input, ABS_X, 0); - input_report_abs(input, ABS_Y, 0); - input_report_abs(input, ABS_DISTANCE, 0); - input_report_abs(input, ABS_TILT_X, 0); - input_report_abs(input, ABS_TILT_Y, 0); - if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { - input_report_key(input, BTN_LEFT, 0); - input_report_key(input, BTN_MIDDLE, 0); - input_report_key(input, BTN_RIGHT, 0); - input_report_key(input, BTN_SIDE, 0); - input_report_key(input, BTN_EXTRA, 0); - input_report_abs(input, ABS_THROTTLE, 0); - input_report_abs(input, ABS_RZ, 0); - } else { - input_report_abs(input, ABS_PRESSURE, 0); - input_report_key(input, BTN_STYLUS, 0); - input_report_key(input, BTN_STYLUS2, 0); - input_report_key(input, BTN_TOUCH, 0); - input_report_abs(input, ABS_WHEEL, 0); - if (features->type >= INTUOS3S) - input_report_abs(input, ABS_Z, 0); - } - input_report_key(input, wacom->tool[idx], 0); - input_report_abs(input, ABS_MISC, 0); /* reset tool id */ - input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); - wacom->id[idx] = 0; + wacom_exit_report(wacom); return 2; } @@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) if (!valid) continue; + if (!prox) { + wacom->shared->stylus_in_proximity = false; + wacom_exit_report(wacom); + input_sync(pen_input); + return; + } if (range) { input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 051a72eecb24..d2cc55e21374 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #endif +#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB +#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 +#endif + /* CPUID function 0x80000001, ebx */ #define CPUID_PKGTYPE_MASK 0xf0000000 #define CPUID_PKGTYPE_F 0x00000000 @@ -72,6 +76,7 @@ struct k10temp_data { struct pci_dev *pdev; void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); int temp_offset; + u32 temp_adjust_mask; }; struct tctl_offset { @@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen 5 1600X", 20000 }, { 0x17, "AMD Ryzen 7 1700X", 20000 }, { 0x17, "AMD Ryzen 7 1800X", 20000 }, + { 0x17, "AMD Ryzen 7 2700X", 10000 }, { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, @@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev, data->read_tempreg(data->pdev, ®val); temp = (regval >> 21) * 125; + if (regval & data->temp_adjust_mask) + temp -= 49000; if (temp > data->temp_offset) temp -= data->temp_offset; else @@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev, data->pdev = pdev; if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || - boot_cpu_data.x86_model == 0x70)) + boot_cpu_data.x86_model == 0x70)) { data->read_tempreg = read_tempreg_nb_f15; - else if (boot_cpu_data.x86 == 0x17) + } else if (boot_cpu_data.x86 == 0x17) { + data->temp_adjust_mask = 0x80000; data->read_tempreg = read_tempreg_nb_f17; - else + } else { data->read_tempreg = read_tempreg_pci; + } for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { const struct tctl_offset *entry = &tctl_offset_table[i]; @@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index 8b0bc4fc06e8..b0bc77bf2cd9 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data) /* Activate logical device if needed */ val = superio_inb(sioaddr, SIO_REG_ENABLE); if (!(val & 0x01)) { - pr_err("EC is disabled\n"); - goto fail; + pr_warn("Forcibly enabling EC access. Data may be unusable.\n"); + superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); } superio_exit(sioaddr); diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 363bf56eb0f2..91976b6ca300 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) scmi_chip_info.info = ptr_scmi_ci; chip_info = &scmi_chip_info; - for (type = 0; type < hwmon_max && nr_count[type]; type++) { + for (type = 0; type < hwmon_max; type++) { + if (!nr_count[type]) + continue; + scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], type, hwmon_attributes[type]); *ptr_scmi_ci++ = scmi_hwmon_chan++; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c4865b08d7fb..8d21b9825d71 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -707,7 +707,6 @@ config I2C_MPC config I2C_MT65XX tristate "MediaTek I2C adapter" depends on ARCH_MEDIATEK || COMPILE_TEST - depends on HAS_DMA help This selects the MediaTek(R) Integrated Inter Circuit bus driver for MT65xx and MT81xx. @@ -885,7 +884,6 @@ config I2C_SH7760 config I2C_SH_MOBILE tristate "SuperH Mobile I2C Controller" - depends on HAS_DMA depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST help If you say yes to this option, support will be included for the @@ -1098,7 +1096,6 @@ config I2C_XLP9XX config I2C_RCAR tristate "Renesas R-Car I2C Controller" - depends on HAS_DMA depends on ARCH_RENESAS || COMPILE_TEST select I2C_SLAVE help diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 25fcc3c1e32b..4053259bccb8 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -86,6 +86,7 @@ struct sprd_i2c { u32 count; int irq; int err; + bool is_suspended; }; static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) @@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct sprd_i2c *i2c_dev = i2c_adap->algo_data; int im, ret; + if (i2c_dev->is_suspended) + return -EBUSY; + ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) return ret; @@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id) struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we got one ACK from slave when writing data, and we did not @@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id) { struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we did not get one ACK from slave when writing data, then we @@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev) static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = true; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_suspend(pdev); } static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = false; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_resume(pdev); } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 036a03f0d0a6..1667b6e7674f 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, */ if (msgs[i].flags & I2C_M_RECV_LEN) { if (!(msgs[i].flags & I2C_M_RD) || - msgs[i].buf[0] < 1 || + msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { res = -EINVAL; diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index ee270e065ba9..2a972ed6851b 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -61,9 +61,12 @@ config INFINIBAND_ON_DEMAND_PAGING pages on demand instead. config INFINIBAND_ADDR_TRANS - bool + bool "RDMA/CM" depends on INFINIBAND default y + ---help--- + Support for RDMA communication manager (CM). + This allows for a generic connection abstraction over RDMA. config INFINIBAND_ADDR_TRANS_CONFIGFS bool diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e337b08de2ff..fb2d347f760f 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -291,14 +291,18 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, * so lookup free slot only if requested. */ if (pempty && empty < 0) { - if (data->props & GID_TABLE_ENTRY_INVALID) { - /* Found an invalid (free) entry; allocate it */ - if (data->props & GID_TABLE_ENTRY_DEFAULT) { - if (default_gid) - empty = curr_index; - } else { - empty = curr_index; - } + if (data->props & GID_TABLE_ENTRY_INVALID && + (default_gid == + !!(data->props & GID_TABLE_ENTRY_DEFAULT))) { + /* + * Found an invalid (free) entry; allocate it. + * If default GID is requested, then our + * found slot must be one of the DEFAULT + * reserved slots or we fail. + * This ensures that only DEFAULT reserved + * slots are used for default property GIDs. + */ + empty = curr_index; } } @@ -420,8 +424,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, return ret; } -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, - union ib_gid *gid, struct ib_gid_attr *attr) +static int +_ib_cache_gid_del(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr, + unsigned long mask, bool default_gid) { struct ib_gid_table *table; int ret = 0; @@ -431,11 +437,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, mutex_lock(&table->lock); - ix = find_gid(table, gid, attr, false, - GID_ATTR_FIND_MASK_GID | - GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_NETDEV, - NULL); + ix = find_gid(table, gid, attr, default_gid, mask, NULL); if (ix < 0) { ret = -EINVAL; goto out_unlock; @@ -452,6 +454,17 @@ out_unlock: return ret; } +int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr) +{ + unsigned long mask = GID_ATTR_FIND_MASK_GID | + GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT | + GID_ATTR_FIND_MASK_NETDEV; + + return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); +} + int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, struct net_device *ndev) { @@ -728,7 +741,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode) { - union ib_gid gid; + union ib_gid gid = { }; struct ib_gid_attr gid_attr; struct ib_gid_table *table; unsigned int gid_type; @@ -736,7 +749,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; - make_default_gid(ndev, &gid); + mask = GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT | + GID_ATTR_FIND_MASK_NETDEV; memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; @@ -747,12 +762,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, gid_attr.gid_type = gid_type; if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { - mask = GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_DEFAULT; + make_default_gid(ndev, &gid); __ib_cache_gid_add(ib_dev, port, &gid, &gid_attr, mask, true); } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { - ib_cache_gid_del(ib_dev, port, &gid, &gid_attr); + _ib_cache_gid_del(ib_dev, port, &gid, + &gid_attr, mask, true); } } } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 51a641002e10..a693fcd4c513 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -382,6 +382,8 @@ struct cma_hdr { #define CMA_VERSION 0x00 struct cma_req_info { + struct sockaddr_storage listen_addr_storage; + struct sockaddr_storage src_addr_storage; struct ib_device *device; int port; union ib_gid local_gid; @@ -866,7 +868,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; - union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { @@ -889,12 +890,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; - ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, - rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index, - &sgid, NULL); - if (ret) - goto out; - BUG_ON(id_priv->cma_dev->device != id_priv->id.device); if (conn_param) @@ -1340,11 +1335,11 @@ static bool validate_net_dev(struct net_device *net_dev, } static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, - const struct cma_req_info *req) + struct cma_req_info *req) { - struct sockaddr_storage listen_addr_storage, src_addr_storage; - struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, - *src_addr = (struct sockaddr *)&src_addr_storage; + struct sockaddr *listen_addr = + (struct sockaddr *)&req->listen_addr_storage; + struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; struct net_device *net_dev; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; int err; @@ -1359,11 +1354,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, if (!net_dev) return ERR_PTR(-ENODEV); - if (!validate_net_dev(net_dev, listen_addr, src_addr)) { - dev_put(net_dev); - return ERR_PTR(-EHOSTUNREACH); - } - return net_dev; } @@ -1490,15 +1480,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, } } + /* + * Net namespace might be getting deleted while route lookup, + * cm_id lookup is in progress. Therefore, perform netdevice + * validation, cm_id lookup under rcu lock. + * RCU lock along with netdevice state check, synchronizes with + * netdevice migrating to different net namespace and also avoids + * case where net namespace doesn't get deleted while lookup is in + * progress. + * If the device state is not IFF_UP, its properties such as ifindex + * and nd_net cannot be trusted to remain valid without rcu lock. + * net/core/dev.c change_net_namespace() ensures to synchronize with + * ongoing operations on net device after device is closed using + * synchronize_net(). + */ + rcu_read_lock(); + if (*net_dev) { + /* + * If netdevice is down, it is likely that it is administratively + * down or it might be migrating to different namespace. + * In that case avoid further processing, as the net namespace + * or ifindex may change. + */ + if (((*net_dev)->flags & IFF_UP) == 0) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + + if (!validate_net_dev(*net_dev, + (struct sockaddr *)&req.listen_addr_storage, + (struct sockaddr *)&req.src_addr_storage)) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + } + bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, rdma_ps_from_service_id(req.service_id), cma_port_from_service_id(req.service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); +err: + rcu_read_unlock(); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } - return id_priv; } diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 9821ae900f6d..da12da1c36f6 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_sockaddr, u8 nl_client) { - struct hlist_head *hash_bucket_head; + struct hlist_head *hash_bucket_head = NULL; struct iwpm_mapping_info *map_info; unsigned long flags; int ret = -EINVAL; @@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); + + if (!hash_bucket_head) + kfree(map_info); return ret; } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index c50596f7f98a..b28452a55a08 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static struct list_head ib_mad_port_list; -static u32 ib_mad_client_id = 0; +static atomic_t ib_mad_client_id = ATOMIC_INIT(0); /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); @@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, } spin_lock_irqsave(&port_priv->reg_lock, flags); - mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; + mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id); /* * Make sure MAD registration (if supplied) diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index cc2966380c0c..c0e4fd55e2cc 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -255,6 +255,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, struct net_device *rdma_ndev) { struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev); + unsigned long gid_type_mask; if (!rdma_ndev) return; @@ -264,21 +265,22 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, rcu_read_lock(); - if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) && - is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) == - BONDING_SLAVE_STATE_INACTIVE) { - unsigned long gid_type_mask; - + if (((rdma_ndev != event_ndev && + !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) || + is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) + == + BONDING_SLAVE_STATE_INACTIVE)) { rcu_read_unlock(); + return; + } - gid_type_mask = roce_gid_type_mask_support(ib_dev, port); + rcu_read_unlock(); - ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, - gid_type_mask, - IB_CACHE_GID_DEFAULT_MODE_DELETE); - } else { - rcu_read_unlock(); - } + gid_type_mask = roce_gid_type_mask_support(ib_dev, port); + + ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, + gid_type_mask, + IB_CACHE_GID_DEFAULT_MODE_DELETE); } static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 74329483af6d..eab43b17e9cf 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -159,6 +159,23 @@ static void ucma_put_ctx(struct ucma_context *ctx) complete(&ctx->comp); } +/* + * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the + * CM_ID is bound. + */ +static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) +{ + struct ucma_context *ctx = ucma_get_ctx(file, id); + + if (IS_ERR(ctx)) + return ctx; + if (!ctx->cm_id->device) { + ucma_put_ctx(ctx); + return ERR_PTR(-EINVAL); + } + return ctx; +} + static void ucma_close_event_id(struct work_struct *work) { struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); @@ -683,7 +700,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - if (!rdma_addr_size_in6(&cmd.src_addr) || + if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || !rdma_addr_size_in6(&cmd.dst_addr)) return -EINVAL; @@ -734,7 +751,7 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1050,7 +1067,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, if (!cmd.conn_param.valid) return -EINVAL; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1092,7 +1109,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1120,7 +1137,7 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1139,7 +1156,7 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1167,15 +1184,10 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (cmd.qp_state > IB_QPS_ERR) return -EINVAL; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (!ctx->cm_id->device) { - ret = -EINVAL; - goto out; - } - resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; @@ -1316,13 +1328,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) - return -EINVAL; - optval = memdup_user(u64_to_user_ptr(cmd.optval), cmd.optlen); if (IS_ERR(optval)) { @@ -1384,7 +1396,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, else return -EINVAL; - ctx = ucma_get_ctx(file, cmd->id); + ctx = ucma_get_ctx_dev(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 13cb5e4deb86..21a887c9523b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -691,6 +691,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, mr->device = pd->device; mr->pd = pd; + mr->dm = NULL; mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->res.type = RDMA_RESTRACK_MR; @@ -765,6 +766,11 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, mr = uobj->object; + if (mr->dm) { + ret = -EINVAL; + goto put_uobjs; + } + if (cmd.flags & IB_MR_REREG_ACCESS) { ret = ib_check_mr_access(cmd.access_flags); if (ret) diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c93970dc8f1..8d32c4ae368c 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -234,6 +234,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met return -EINVAL; } + for (; i < method_spec->num_buckets; i++) { + struct uverbs_attr_spec_hash *attr_spec_bucket = + method_spec->attr_buckets[i]; + + if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask, + attr_spec_bucket->num_attrs)) + return -EINVAL; + } + return 0; } diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c index cbcec3da12f6..b4f016dfa23d 100644 --- a/drivers/infiniband/core/uverbs_std_types_flow_action.c +++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c @@ -363,28 +363,28 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { - .ptr = { + { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm), .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, - }, + } }, }, }; static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = { - .ptr = { + { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, /* No need to specify any data */ .len = 0, - } + } } }, [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { - .ptr = { + { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size), .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, - } + } } }, }; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7eff3aeffe01..6ddfb1fade79 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1656,6 +1656,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; + mr->dm = NULL; mr->uobject = NULL; atomic_inc(&pd->usecnt); mr->need_inval = false; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 6f2b26126c64..2be2e1ac1b5f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq) * Deal with out-of-order and/or completions that complete * prior unsignalled WRs. */ -void c4iw_flush_hw_cq(struct c4iw_cq *chp) +void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp) { struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct c4iw_qp *qhp; @@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) if (qhp == NULL) goto next_cqe; + if (flush_qhp != qhp) { + spin_lock(&qhp->lock); + + if (qhp->wq.flushed == 1) + goto next_cqe; + } + if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) goto next_cqe; @@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) next_cqe: t4_hwcq_consume(&chp->cq); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); + if (qhp && flush_qhp != qhp) + spin_unlock(&qhp->lock); } } diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index feeb8ee6f4a2..44161ca4d2a8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->status_page->db_off = 0; + init_completion(&rdev->rqt_compl); + init_completion(&rdev->pbl_compl); + kref_init(&rdev->rqt_kref); + kref_init(&rdev->pbl_kref); + return 0; err_free_status_page_and_wr_log: if (c4iw_wr_log && rdev->wr_log) @@ -893,13 +898,15 @@ destroy_resource: static void c4iw_rdev_close(struct c4iw_rdev *rdev) { - destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + wait_for_completion(&rdev->pbl_compl); + wait_for_completion(&rdev->rqt_compl); c4iw_ocqp_pool_destroy(rdev); + destroy_workqueue(rdev->free_workq); c4iw_destroy_resource(&rdev->resource); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index cc929002c05e..831027717121 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -185,6 +185,10 @@ struct c4iw_rdev { struct wr_log_entry *wr_log; int wr_log_size; struct workqueue_struct *free_workq; + struct completion rqt_compl; + struct completion pbl_compl; + struct kref rqt_kref; + struct kref pbl_kref; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -1049,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); -void c4iw_flush_hw_cq(struct c4iw_cq *chp); +void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index de77b6027d69..ae167b686608 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, qhp->wq.flushed = 1; t4_set_wq_in_error(&qhp->wq); - c4iw_flush_hw_cq(rchp); + c4iw_flush_hw_cq(rchp, qhp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); if (schp != rchp) - c4iw_flush_hw_cq(schp); + c4iw_flush_hw_cq(schp, qhp); sq_flushed = c4iw_flush_sq(qhp); spin_unlock(&qhp->lock); diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 3cf25997ed2b..0ef25ae05e6f 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c @@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; + kref_get(&rdev->pbl_kref); } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } +static void destroy_pblpool(struct kref *kref) +{ + struct c4iw_rdev *rdev; + + rdev = container_of(kref, struct c4iw_rdev, pbl_kref); + gen_pool_destroy(rdev->pbl_pool); + complete(&rdev->pbl_compl); +} + void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size); @@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); + kref_put(&rdev->pbl_kref, destroy_pblpool); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) @@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { - gen_pool_destroy(rdev->pbl_pool); + kref_put(&rdev->pbl_kref, destroy_pblpool); } /* @@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; + kref_get(&rdev->rqt_kref); } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } +static void destroy_rqtpool(struct kref *kref) +{ + struct c4iw_rdev *rdev; + + rdev = container_of(kref, struct c4iw_rdev, rqt_kref); + gen_pool_destroy(rdev->rqt_pool); + complete(&rdev->rqt_compl); +} + void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size << 6); @@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); + kref_put(&rdev->rqt_kref, destroy_rqtpool); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) @@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { - gen_pool_destroy(rdev->rqt_pool); + kref_put(&rdev->rqt_kref, destroy_rqtpool); } /* diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a97055dd4fbd..b5fab55cc275 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix) static int get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { - int ret; cpumask_var_t diff; struct hfi1_affinity_node *entry; struct cpu_mask_set *set = NULL; @@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, extra[0] = '\0'; cpumask_clear(&msix->mask); - ret = zalloc_cpumask_var(&diff, GFP_KERNEL); - if (!ret) - return -ENOMEM; - entry = node_affinity_lookup(dd->node); switch (msix->type) { @@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd, * finds its CPU here. */ if (cpu == -1 && set) { + if (!zalloc_cpumask_var(&diff, GFP_KERNEL)) + return -ENOMEM; + if (cpumask_equal(&set->mask, &set->used)) { /* * We've used up all the CPUs, bump up the generation @@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd, cpumask_andnot(diff, &set->mask, &set->used); cpu = cpumask_first(diff); cpumask_set_cpu(cpu, &set->used); + + free_cpumask_var(diff); } cpumask_set_cpu(cpu, &msix->mask); @@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, hfi1_setup_sdma_notifier(msix); } - free_cpumask_var(diff); return 0; } diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 46d1475b2154..bd837a048bf4 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -433,31 +433,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, bool do_cnp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_other_headers *ohdr = pkt->ohdr; struct ib_grh *grh = pkt->grh; u32 rqpn = 0, bth1; - u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr); + u16 pkey; + u32 rlid, slid, dlid = 0; u8 hdr_type, sc, svc_type; bool is_mcast = false; + /* can be called from prescan */ if (pkt->etype == RHF_RCV_TYPE_BYPASS) { is_mcast = hfi1_is_16B_mcast(dlid); pkey = hfi1_16B_get_pkey(pkt->hdr); sc = hfi1_16B_get_sc(pkt->hdr); + dlid = hfi1_16B_get_dlid(pkt->hdr); + slid = hfi1_16B_get_slid(pkt->hdr); hdr_type = HFI1_PKT_TYPE_16B; } else { is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); pkey = ib_bth_get_pkey(ohdr); sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); + dlid = ib_get_dlid(pkt->hdr); + slid = ib_get_slid(pkt->hdr); hdr_type = HFI1_PKT_TYPE_9B; } switch (qp->ibqp.qp_type) { + case IB_QPT_UD: + dlid = ppd->lid; + rlid = slid; + rqpn = ib_get_sqpn(pkt->ohdr); + svc_type = IB_CC_SVCTYPE_UD; + break; case IB_QPT_SMI: case IB_QPT_GSI: - case IB_QPT_UD: - rlid = ib_get_slid(pkt->hdr); + rlid = slid; rqpn = ib_get_sqpn(pkt->ohdr); svc_type = IB_CC_SVCTYPE_UD; break; @@ -482,7 +494,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, dlid, rlid, sc, grh); if (!is_mcast && (bth1 & IB_BECN_SMASK)) { - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 lqpn = bth1 & RVT_QPN_MASK; u8 sl = ibp->sc_to_sl[sc]; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 32c48265405e..cac2c62bc42d 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1537,13 +1537,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd); void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, u32 rqpn, u8 svc_type); void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, - u32 pkey, u32 slid, u32 dlid, u8 sc5, + u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); #define PKEY_CHECK_INVALID -1 @@ -2437,7 +2437,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr, ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT); lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) | ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT); - lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT); + lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT); lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4; hdr->lrh[0] = lrh0; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 33eba2356742..6309edf811df 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -88,9 +88,9 @@ * pio buffers per ctxt, etc.) Zero means use one user context per CPU. */ int num_user_contexts = -1; -module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); +module_param_named(num_user_contexts, num_user_contexts, int, 0444); MODULE_PARM_DESC( - num_user_contexts, "Set max number of user contexts to use"); + num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); uint krcvqs[RXE_NUM_DATA_VL]; int krcvqsset; @@ -1209,30 +1209,49 @@ static void finalize_asic_data(struct hfi1_devdata *dd, kfree(ad); } -static void __hfi1_free_devdata(struct kobject *kobj) +/** + * hfi1_clean_devdata - cleans up per-unit data structure + * @dd: pointer to a valid devdata structure + * + * It cleans up all data structures set up by + * by hfi1_alloc_devdata(). + */ +static void hfi1_clean_devdata(struct hfi1_devdata *dd) { - struct hfi1_devdata *dd = - container_of(kobj, struct hfi1_devdata, kobj); struct hfi1_asic_data *ad; unsigned long flags; spin_lock_irqsave(&hfi1_devs_lock, flags); - idr_remove(&hfi1_unit_table, dd->unit); - list_del(&dd->list); + if (!list_empty(&dd->list)) { + idr_remove(&hfi1_unit_table, dd->unit); + list_del_init(&dd->list); + } ad = release_asic_data(dd); spin_unlock_irqrestore(&hfi1_devs_lock, flags); - if (ad) - finalize_asic_data(dd, ad); + + finalize_asic_data(dd, ad); free_platform_config(dd); rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); free_percpu(dd->send_schedule); free_percpu(dd->tx_opstats); + dd->int_counter = NULL; + dd->rcv_limit = NULL; + dd->send_schedule = NULL; + dd->tx_opstats = NULL; sdma_clean(dd, dd->num_sdma); rvt_dealloc_device(&dd->verbs_dev.rdi); } +static void __hfi1_free_devdata(struct kobject *kobj) +{ + struct hfi1_devdata *dd = + container_of(kobj, struct hfi1_devdata, kobj); + + hfi1_clean_devdata(dd); +} + static struct kobj_type hfi1_devdata_type = { .release = __hfi1_free_devdata, }; @@ -1265,6 +1284,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) return ERR_PTR(-ENOMEM); dd->num_pports = nports; dd->pport = (struct hfi1_pportdata *)(dd + 1); + dd->pcidev = pdev; + pci_set_drvdata(pdev, dd); INIT_LIST_HEAD(&dd->list); idr_preload(GFP_KERNEL); @@ -1331,9 +1352,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) return dd; bail: - if (!list_empty(&dd->list)) - list_del_init(&dd->list); - rvt_dealloc_device(&dd->verbs_dev.rdi); + hfi1_clean_devdata(dd); return ERR_PTR(ret); } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 83d66e862207..c1c982908b4b 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) resource_size_t addr; int ret = 0; - dd->pcidev = pdev; - pci_set_drvdata(pdev, dd); - addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index d486355880cb..cbf7faa5038c 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -199,6 +199,7 @@ void free_platform_config(struct hfi1_devdata *dd) { /* Release memory allocated for eprom or fallback file read. */ kfree(dd->platform_config.data); + dd->platform_config.data = NULL; } void get_port_type(struct hfi1_pportdata *ppd) diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 1869f639c3ae..b5966991d647 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c @@ -204,6 +204,8 @@ static void clean_i2c_bus(struct hfi1_i2c_bus *bus) void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad) { + if (!ad) + return; clean_i2c_bus(ad->i2c_bus0); ad->i2c_bus0 = NULL; clean_i2c_bus(ad->i2c_bus1); diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index 3daa94bdae3a..c0071ca4147a 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -733,6 +733,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp, ohdr->bth[2] = cpu_to_be32(bth2); } +/** + * hfi1_make_ruc_header_16B - build a 16B header + * @qp: the queue pair + * @ohdr: a pointer to the destination header memory + * @bth0: bth0 passed in from the RC/UC builder + * @bth2: bth2 passed in from the RC/UC builder + * @middle: non zero implies indicates ahg "could" be used + * @ps: the current packet state + * + * This routine may disarm ahg under these situations: + * - packet needs a GRH + * - BECN needed + * - migration state not IB_MIG_MIGRATED + */ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2, int middle, @@ -777,6 +791,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, else middle = 0; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; + /* we recently received a FECN, so return a BECN */ + becn = true; + middle = 0; + } if (middle) build_ahg(qp, bth2); else @@ -784,11 +804,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, bth0 |= pkey; bth0 |= extra_bytes << 20; - if (qp->s_flags & RVT_S_ECN) { - qp->s_flags &= ~RVT_S_ECN; - /* we recently received a FECN, so return a BECN */ - becn = true; - } hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); if (!ppd->lid) @@ -806,6 +821,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, pkey, becn, 0, l4, priv->s_sc); } +/** + * hfi1_make_ruc_header_9B - build a 9B header + * @qp: the queue pair + * @ohdr: a pointer to the destination header memory + * @bth0: bth0 passed in from the RC/UC builder + * @bth2: bth2 passed in from the RC/UC builder + * @middle: non zero implies indicates ahg "could" be used + * @ps: the current packet state + * + * This routine may disarm ahg under these situations: + * - packet needs a GRH + * - BECN needed + * - migration state not IB_MIG_MIGRATED + */ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2, int middle, @@ -839,6 +868,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, else middle = 0; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; + /* we recently received a FECN, so return a BECN */ + bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); + middle = 0; + } if (middle) build_ahg(qp, bth2); else @@ -846,11 +881,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, bth0 |= pkey; bth0 |= extra_bytes << 20; - if (qp->s_flags & RVT_S_ECN) { - qp->s_flags &= ~RVT_S_ECN; - /* we recently received a FECN, so return a BECN */ - bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); - } hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, lrh0, diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index bcf3b0bebac8..69c17a5ef038 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -628,7 +628,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey) } void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; @@ -687,7 +687,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, } void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, - u32 pkey, u32 slid, u32 dlid, u8 sc5, + u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 0eeabfbee192..63b5b3edabcb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -912,7 +912,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, obj_per_chunk = buf_chunk_size / obj_size; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; bt_chunk_num = bt_chunk_size / 8; - if (table->type >= HEM_TYPE_MTT) + if (type >= HEM_TYPE_MTT) num_bt_l0 = bt_chunk_num; table->hem = kcalloc(num_hem, sizeof(*table->hem), @@ -920,7 +920,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, if (!table->hem) goto err_kcalloc_hem_buf; - if (check_whether_bt_num_3(table->type, hop_num)) { + if (check_whether_bt_num_3(type, hop_num)) { unsigned long num_bt_l1; num_bt_l1 = (num_hem + bt_chunk_num - 1) / @@ -939,8 +939,8 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, goto err_kcalloc_l1_dma; } - if (check_whether_bt_num_2(table->type, hop_num) || - check_whether_bt_num_3(table->type, hop_num)) { + if (check_whether_bt_num_2(type, hop_num) || + check_whether_bt_num_3(type, hop_num)) { table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0), GFP_KERNEL); if (!table->bt_l0) @@ -1039,14 +1039,14 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) { hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); - hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); if (hr_dev->caps.trrl_entry_sz) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.trrl_table); + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); - hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_cqe_table); + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 8b84ab7800d8..25916e8522ed 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -71,6 +71,11 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, return -EINVAL; } + if (wr->opcode == IB_WR_RDMA_READ) { + dev_err(hr_dev->dev, "Not support inline data!\n"); + return -EINVAL; + } + for (i = 0; i < wr->num_sge; i++) { memcpy(wqe, ((void *)wr->sg_list[i].addr), wr->sg_list[i].length); @@ -148,7 +153,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_UD)) { dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); - *bad_wr = NULL; + *bad_wr = wr; return -EOPNOTSUPP; } @@ -182,7 +187,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; - owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1; + owner_bit = + ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); /* Corresponding to the QP type, wqe process separately */ if (ibqp->qp_type == IB_QPT_GSI) { @@ -456,6 +462,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } else { dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); spin_unlock_irqrestore(&qp->sq.lock, flags); + *bad_wr = wr; return -EOPNOTSUPP; } } @@ -2592,10 +2599,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, 0); - roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn); - roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, 0); + if (attr_mask & IB_QP_DEST_QPN) { + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, + V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn); + roce_set_field(qpc_mask->byte_56_dqpn_err, + V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); + } roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, @@ -2650,8 +2659,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, return -EINVAL; } - if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) { + if (attr_mask & IB_QP_ALT_PATH) { dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask); return -EINVAL; } @@ -2800,10 +2808,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_140_RR_MAX_S, 0); } - roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); - roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, 0); + if (attr_mask & IB_QP_DEST_QPN) { + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, + V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); + roce_set_field(qpc_mask->byte_56_dqpn_err, + V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); + } /* Configure GID index */ port_num = rdma_ah_get_port_num(&attr->ah_attr); @@ -2845,7 +2855,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); - else + else if (attr_mask & IB_QP_PATH_MTU) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S, attr->path_mtu); @@ -2922,11 +2932,9 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, return -EINVAL; } - /* If exist optional param, return error */ - if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) || - (attr_mask & IB_QP_CUR_STATE) || - (attr_mask & IB_QP_MIN_RNR_TIMER)) { + /* Not support alternate path and path migration */ + if ((attr_mask & IB_QP_ALT_PATH) || + (attr_mask & IB_QP_PATH_MIG_STATE)) { dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); return -EINVAL; } @@ -3161,7 +3169,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) { + (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) || + (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { /* Nothing */ ; } else { @@ -4478,7 +4487,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { - dev_err(dev, "[mailbox cmd] creat eqc failed.\n"); + dev_err(dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e289a924e789..d4aad34c21e2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -620,7 +620,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, to_hr_ucontext(ib_pd->uobject->context), ucmd.db_addr, &hr_qp->rdb); if (ret) { - dev_err(dev, "rp record doorbell map failed!\n"); + dev_err(dev, "rq record doorbell map failed!\n"); goto err_mtt; } } diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 17f4f151a97f..61d8b06375bb 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -346,7 +346,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, /* Add to the first block the misalignment that it suffers from. */ total_len += (first_block_start & ((1ULL << block_shift) - 1ULL)); last_block_end = current_block_start + current_block_len; - last_block_aligned_end = round_up(last_block_end, 1 << block_shift); + last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift); total_len += (last_block_aligned_end - last_block_end); if (total_len & ((1ULL << block_shift) - 1ULL)) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 50af8915e7ec..199648adac74 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -673,7 +673,8 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, MLX4_IB_RX_HASH_SRC_PORT_TCP | MLX4_IB_RX_HASH_DST_PORT_TCP | MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP)) { + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index bce263b92821..fb4d77be019b 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -1,6 +1,7 @@ config MLX5_INFINIBAND tristate "Mellanox Connect-IB HCA support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n ---help--- This driver provides low-level InfiniBand support for Mellanox Connect-IB PCI Express host channel adapters (HCAs). diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index daa919e5a442..b4d8ff8ab807 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -52,7 +52,6 @@ #include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> -#include <linux/mlx5/fs_helpers.h> #include <linux/list.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> @@ -180,7 +179,7 @@ static int mlx5_netdev_event(struct notifier_block *this, if (rep_ndev == ndev) roce->netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; - } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) { + } else if (ndev->dev.parent == &mdev->pdev->dev) { roce->netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; } @@ -4757,7 +4756,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - return mlx5_get_vector_affinity(dev->mdev, comp_vector); + return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); } /* The mlx5_ib_multiport_mutex should be held when calling this function */ @@ -5427,9 +5426,7 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) { dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); - if (!dev->mdev->priv.uar) - return -ENOMEM; - return 0; + return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); } static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 1520a2f20f98..90a9c461cedc 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int *order) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_umem *u; int err; - *umem = ib_umem_get(pd->uobject->context, start, length, - access_flags, 0); - err = PTR_ERR_OR_ZERO(*umem); + *umem = NULL; + + u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0); + err = PTR_ERR_OR_ZERO(u); if (err) { - *umem = NULL; - mlx5_ib_err(dev, "umem get failed (%d)\n", err); + mlx5_ib_dbg(dev, "umem get failed (%d)\n", err); return err; } - mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, + mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, page_shift, ncont, order); if (!*npages) { mlx5_ib_warn(dev, "avoid zero region\n"); - ib_umem_release(*umem); + ib_umem_release(u); return -EINVAL; } + *umem = u; + mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", *npages, *ncont, *order, *page_shift); @@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, int access_flags = flags & IB_MR_REREG_ACCESS ? new_access_flags : mr->access_flags; - u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; - u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; int page_shift = 0; int upd_flags = 0; int npages = 0; int ncont = 0; int order = 0; + u64 addr, len; int err; mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", @@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + if (!mr->umem) + return -EINVAL; + + if (flags & IB_MR_REREG_TRANS) { + addr = virt_addr; + len = length; + } else { + addr = mr->umem->address; + len = mr->umem->length; + } + if (flags != IB_MR_REREG_PD) { /* * Replace umem. This needs to be done whether or not UMR is @@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, */ flags |= IB_MR_REREG_TRANS; ib_umem_release(mr->umem); + mr->umem = NULL; err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7ed4b70f6447..87b7c1be2a11 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -259,7 +259,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, } else { if (ucmd) { qp->rq.wqe_cnt = ucmd->rq_wqe_count; + if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) + return -EINVAL; qp->rq.wqe_shift = ucmd->rq_wqe_shift; + if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig) + return -EINVAL; qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; qp->rq.max_post = qp->rq.wqe_cnt; } else { @@ -2451,18 +2455,18 @@ enum { static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { - if (rate == IB_RATE_PORT_CURRENT) { + if (rate == IB_RATE_PORT_CURRENT) return 0; - } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { + + if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) return -EINVAL; - } else { - while (rate != IB_RATE_2_5_GBPS && - !(1 << (rate + MLX5_STAT_RATE_OFFSET) & - MLX5_CAP_GEN(dev->mdev, stat_rate_support))) - --rate; - } - return rate + MLX5_STAT_RATE_OFFSET; + while (rate != IB_RATE_PORT_CURRENT && + !(1 << (rate + MLX5_STAT_RATE_OFFSET) & + MLX5_CAP_GEN(dev->mdev, stat_rate_support))) + --rate; + + return rate ? rate + MLX5_STAT_RATE_OFFSET : rate; } static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 0a75164cedea..007d5e8a0121 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev) /** * nes_netdev_start_xmit */ -static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 61927c165b59..4cf11063e0b5 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK, + | RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 7bdaf71b8221..785199990457 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -728,7 +728,6 @@ next_wqe: rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (ret == -EAGAIN) { - kfree_skb(skb); rxe_run_task(&qp->req.task, 1); goto exit; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index a65c9969f7fc..955ff3b6da9c 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp, err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); if (err) { pr_err("Failed sending RDMA reply.\n"); - kfree_skb(skb); return RESPST_ERR_RNR; } @@ -954,10 +953,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, } err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); - if (err) { + if (err) pr_err_ratelimited("Failed sending ack\n"); - kfree_skb(skb); - } err1: return err; @@ -1141,7 +1138,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (rc) { pr_err("Failed resending result. This flow is not handled - skb ignored\n"); rxe_drop_ref(qp); - kfree_skb(skb_copy); rc = RESPST_CLEANUP; goto out; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 161ba8c76285..cf291f90b58f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1094,7 +1094,7 @@ drop_and_unlock: spin_unlock_irqrestore(&priv->lock, flags); } -static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); struct rdma_netdev *rn = netdev_priv(dev); diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig index c74ee9633041..99db8fe5173a 100644 --- a/drivers/infiniband/ulp/srp/Kconfig +++ b/drivers/infiniband/ulp/srp/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRP tristate "InfiniBand SCSI RDMA Protocol" - depends on SCSI + depends on SCSI && INFINIBAND_ADDR_TRANS select SCSI_SRP_ATTRS ---help--- Support for the SCSI RDMA Protocol over InfiniBand. This diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index 31ee83d528d9..fb8b7182f05e 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRPT tristate "InfiniBand SCSI RDMA Protocol target support" - depends on INFINIBAND && TARGET_CORE + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE ---help--- Support for the SCSI RDMA Protocol (SRP) Target driver. The diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 46115a392098..c81c79d01d93 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -31,6 +31,7 @@ enum evdev_clock_type { EV_CLK_REAL = 0, EV_CLK_MONO, + EV_CLK_BOOT, EV_CLK_MAX }; @@ -197,10 +198,12 @@ static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid) case CLOCK_REALTIME: clk_type = EV_CLK_REAL; break; - case CLOCK_BOOTTIME: case CLOCK_MONOTONIC: clk_type = EV_CLK_MONO; break; + case CLOCK_BOOTTIME: + clk_type = EV_CLK_BOOT; + break; default: return -EINVAL; } @@ -311,6 +314,8 @@ static void evdev_events(struct input_handle *handle, ev_time[EV_CLK_MONO] = ktime_get(); ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); + ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO], + TK_OFFS_BOOT); rcu_read_lock(); diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 766bf2660116..5f04b2d94635 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c @@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler, const struct input_device_id *id) { struct input_leds *leds; + struct input_led *led; unsigned int num_leds; unsigned int led_code; int led_no; @@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler, led_no = 0; for_each_set_bit(led_code, dev->ledbit, LED_CNT) { - struct input_led *led = &leds->leds[led_no]; + if (!input_led_info[led_code].name) + continue; + led = &leds->leds[led_no]; led->handle = &leds->handle; led->code = led_code; - if (!input_led_info[led_code].name) - continue; - led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", dev_name(&dev->dev), input_led_info[led_code].name); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 0a67f235ba88..38f9501acdf0 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -583,7 +583,7 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse) x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); - z = packet[4] & 0x7c; + z = packet[4] & 0x7f; /* * The x and y values tend to be quite large, and when used diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index 76edbf2c1bce..082defc329a8 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi, if (len > RMI_SPI_XFER_SIZE_LIMIT) return -EINVAL; - if (rmi_spi->xfer_buf_size < len) - rmi_spi_manage_pools(rmi_spi, len); + if (rmi_spi->xfer_buf_size < len) { + ret = rmi_spi_manage_pools(rmi_spi, len); + if (ret < 0) + return ret; + } if (addr == 0) /* diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 4f15496fec8b..3e613afa10b4 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -362,7 +362,7 @@ config TOUCHSCREEN_HIDEEP If unsure, say N. - To compile this driver as a moudle, choose M here : the + To compile this driver as a module, choose M here : the module will be called hideep_ts. config TOUCHSCREEN_ILI210X diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 5d9699fe1b55..09194721aed2 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -280,7 +280,8 @@ struct mxt_data { struct input_dev *input_dev; char phys[64]; /* device physical location */ struct mxt_object *object_table; - struct mxt_info info; + struct mxt_info *info; + void *raw_info_block; unsigned int irq; unsigned int max_x; unsigned int max_y; @@ -460,12 +461,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) { u8 appmode = data->client->addr; u8 bootloader; + u8 family_id = data->info ? data->info->family_id : 0; switch (appmode) { case 0x4a: case 0x4b: /* Chips after 1664S use different scheme */ - if (retry || data->info.family_id >= 0xa2) { + if (retry || family_id >= 0xa2) { bootloader = appmode - 0x24; break; } @@ -692,7 +694,7 @@ mxt_get_object(struct mxt_data *data, u8 type) struct mxt_object *object; int i; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (object->type == type) return object; @@ -1462,12 +1464,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) data_pos += offset; } - if (cfg_info.family_id != data->info.family_id) { + if (cfg_info.family_id != data->info->family_id) { dev_err(dev, "Family ID mismatch!\n"); return -EINVAL; } - if (cfg_info.variant_id != data->info.variant_id) { + if (cfg_info.variant_id != data->info->variant_id) { dev_err(dev, "Variant ID mismatch!\n"); return -EINVAL; } @@ -1512,7 +1514,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) /* Malloc memory to store configuration */ cfg_start_ofs = MXT_OBJECT_START + - data->info.object_num * sizeof(struct mxt_object) + + data->info->object_num * sizeof(struct mxt_object) + MXT_INFO_CHECKSUM_SIZE; config_mem_size = data->mem_size - cfg_start_ofs; config_mem = kzalloc(config_mem_size, GFP_KERNEL); @@ -1563,20 +1565,6 @@ release_mem: return ret; } -static int mxt_get_info(struct mxt_data *data) -{ - struct i2c_client *client = data->client; - struct mxt_info *info = &data->info; - int error; - - /* Read 7-byte info block starting at address 0 */ - error = __mxt_read_reg(client, 0, sizeof(*info), info); - if (error) - return error; - - return 0; -} - static void mxt_free_input_device(struct mxt_data *data) { if (data->input_dev) { @@ -1591,9 +1579,10 @@ static void mxt_free_object_table(struct mxt_data *data) video_unregister_device(&data->dbg.vdev); v4l2_device_unregister(&data->dbg.v4l2); #endif - - kfree(data->object_table); data->object_table = NULL; + data->info = NULL; + kfree(data->raw_info_block); + data->raw_info_block = NULL; kfree(data->msg_buf); data->msg_buf = NULL; data->T5_address = 0; @@ -1609,34 +1598,18 @@ static void mxt_free_object_table(struct mxt_data *data) data->max_reportid = 0; } -static int mxt_get_object_table(struct mxt_data *data) +static int mxt_parse_object_table(struct mxt_data *data, + struct mxt_object *object_table) { struct i2c_client *client = data->client; - size_t table_size; - struct mxt_object *object_table; - int error; int i; u8 reportid; u16 end_address; - table_size = data->info.object_num * sizeof(struct mxt_object); - object_table = kzalloc(table_size, GFP_KERNEL); - if (!object_table) { - dev_err(&data->client->dev, "Failed to allocate memory\n"); - return -ENOMEM; - } - - error = __mxt_read_reg(client, MXT_OBJECT_START, table_size, - object_table); - if (error) { - kfree(object_table); - return error; - } - /* Valid Report IDs start counting from 1 */ reportid = 1; data->mem_size = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { struct mxt_object *object = object_table + i; u8 min_id, max_id; @@ -1660,8 +1633,8 @@ static int mxt_get_object_table(struct mxt_data *data) switch (object->type) { case MXT_GEN_MESSAGE_T5: - if (data->info.family_id == 0x80 && - data->info.version < 0x20) { + if (data->info->family_id == 0x80 && + data->info->version < 0x20) { /* * On mXT224 firmware versions prior to V2.0 * read and discard unused CRC byte otherwise @@ -1716,24 +1689,102 @@ static int mxt_get_object_table(struct mxt_data *data) /* If T44 exists, T5 position has to be directly after */ if (data->T44_address && (data->T5_address != data->T44_address + 1)) { dev_err(&client->dev, "Invalid T44 position\n"); - error = -EINVAL; - goto free_object_table; + return -EINVAL; } data->msg_buf = kcalloc(data->max_reportid, data->T5_msg_size, GFP_KERNEL); - if (!data->msg_buf) { - dev_err(&client->dev, "Failed to allocate message buffer\n"); + if (!data->msg_buf) + return -ENOMEM; + + return 0; +} + +static int mxt_read_info_block(struct mxt_data *data) +{ + struct i2c_client *client = data->client; + int error; + size_t size; + void *id_buf, *buf; + uint8_t num_objects; + u32 calculated_crc; + u8 *crc_ptr; + + /* If info block already allocated, free it */ + if (data->raw_info_block) + mxt_free_object_table(data); + + /* Read 7-byte ID information block starting at address 0 */ + size = sizeof(struct mxt_info); + id_buf = kzalloc(size, GFP_KERNEL); + if (!id_buf) + return -ENOMEM; + + error = __mxt_read_reg(client, 0, size, id_buf); + if (error) + goto err_free_mem; + + /* Resize buffer to give space for rest of info block */ + num_objects = ((struct mxt_info *)id_buf)->object_num; + size += (num_objects * sizeof(struct mxt_object)) + + MXT_INFO_CHECKSUM_SIZE; + + buf = krealloc(id_buf, size, GFP_KERNEL); + if (!buf) { error = -ENOMEM; - goto free_object_table; + goto err_free_mem; + } + id_buf = buf; + + /* Read rest of info block */ + error = __mxt_read_reg(client, MXT_OBJECT_START, + size - MXT_OBJECT_START, + id_buf + MXT_OBJECT_START); + if (error) + goto err_free_mem; + + /* Extract & calculate checksum */ + crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE; + data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16); + + calculated_crc = mxt_calculate_crc(id_buf, 0, + size - MXT_INFO_CHECKSUM_SIZE); + + /* + * CRC mismatch can be caused by data corruption due to I2C comms + * issue or else device is not using Object Based Protocol (eg i2c-hid) + */ + if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) { + dev_err(&client->dev, + "Info Block CRC error calculated=0x%06X read=0x%06X\n", + calculated_crc, data->info_crc); + error = -EIO; + goto err_free_mem; + } + + data->raw_info_block = id_buf; + data->info = (struct mxt_info *)id_buf; + + dev_info(&client->dev, + "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", + data->info->family_id, data->info->variant_id, + data->info->version >> 4, data->info->version & 0xf, + data->info->build, data->info->object_num); + + /* Parse object table information */ + error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START); + if (error) { + dev_err(&client->dev, "Error %d parsing object table\n", error); + mxt_free_object_table(data); + goto err_free_mem; } - data->object_table = object_table; + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START); return 0; -free_object_table: - mxt_free_object_table(data); +err_free_mem: + kfree(id_buf); return error; } @@ -2046,7 +2097,7 @@ static int mxt_initialize(struct mxt_data *data) int error; while (1) { - error = mxt_get_info(data); + error = mxt_read_info_block(data); if (!error) break; @@ -2077,16 +2128,9 @@ static int mxt_initialize(struct mxt_data *data) msleep(MXT_FW_RESET_TIME); } - /* Get object table information */ - error = mxt_get_object_table(data); - if (error) { - dev_err(&client->dev, "Error %d reading object table\n", error); - return error; - } - error = mxt_acquire_irq(data); if (error) - goto err_free_object_table; + return error; error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, &client->dev, GFP_KERNEL, data, @@ -2094,14 +2138,10 @@ static int mxt_initialize(struct mxt_data *data) if (error) { dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", error); - goto err_free_object_table; + return error; } return 0; - -err_free_object_table: - mxt_free_object_table(data); - return error; } static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) @@ -2162,7 +2202,7 @@ recheck: static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, unsigned int y) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; unsigned int ofs, page; unsigned int col = 0; @@ -2490,7 +2530,7 @@ static const struct video_device mxt_video_device = { static void mxt_debug_init(struct mxt_data *data) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; struct mxt_object *object; int error; @@ -2576,7 +2616,6 @@ static int mxt_configure_objects(struct mxt_data *data, const struct firmware *cfg) { struct device *dev = &data->client->dev; - struct mxt_info *info = &data->info; int error; error = mxt_init_t7_power_cfg(data); @@ -2601,11 +2640,6 @@ static int mxt_configure_objects(struct mxt_data *data, mxt_debug_init(data); - dev_info(dev, - "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", - info->family_id, info->variant_id, info->version >> 4, - info->version & 0xf, info->build, info->object_num); - return 0; } @@ -2614,7 +2648,7 @@ static ssize_t mxt_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", info->version >> 4, info->version & 0xf, info->build); } @@ -2624,7 +2658,7 @@ static ssize_t mxt_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u\n", info->family_id, info->variant_id); } @@ -2663,7 +2697,7 @@ static ssize_t mxt_object_show(struct device *dev, return -ENOMEM; error = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (!mxt_object_readable(object->type)) @@ -3035,6 +3069,15 @@ static const struct dmi_system_id mxt_dmi_table[] = { .driver_data = samus_platform_data, }, { + /* Samsung Chromebook Pro */ + .ident = "Samsung Chromebook Pro", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"), + }, + .driver_data = samus_platform_data, + }, + { /* Other Google Chromebooks */ .ident = "Chromebook", .matches = { @@ -3254,6 +3297,11 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume); static const struct of_device_id mxt_of_match[] = { { .compatible = "atmel,maxtouch", }, + /* Compatibles listed below are deprecated */ + { .compatible = "atmel,qt602240_ts", }, + { .compatible = "atmel,atmel_mxt_ts", }, + { .compatible = "atmel,atmel_mxt_tp", }, + { .compatible = "atmel,mXT224", }, {}, }; MODULE_DEVICE_TABLE(of, mxt_of_match); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2a99f0f14795..8fb8c737fffe 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -83,7 +83,6 @@ static DEFINE_SPINLOCK(amd_iommu_devtable_lock); static DEFINE_SPINLOCK(pd_bitmap_lock); -static DEFINE_SPINLOCK(iommu_table_lock); /* List of all available dev_data structures */ static LLIST_HEAD(dev_data_list); @@ -3562,6 +3561,7 @@ EXPORT_SYMBOL(amd_iommu_device_info); *****************************************************************************/ static struct irq_chip amd_ir_chip; +static DEFINE_SPINLOCK(iommu_table_lock); static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) { diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f05f3cf90756..ddcbbdb5d658 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -167,40 +167,16 @@ EXPORT_SYMBOL(iommu_put_dma_cookie); * @list: Reserved region list from iommu_get_resv_regions() * * IOMMU drivers can use this to implement their .get_resv_regions callback - * for general non-IOMMU-specific reservations. Currently, this covers host - * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI - * based ARM platforms that may require HW MSI reservation. + * for general non-IOMMU-specific reservations. Currently, this covers GICv3 + * ITS region reservation on ACPI based ARM platforms that may require HW MSI + * reservation. */ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { - struct pci_host_bridge *bridge; - struct resource_entry *window; - - if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) && - iort_iommu_msi_get_resv_regions(dev, list) < 0) - return; - - if (!dev_is_pci(dev)) - return; - - bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); - resource_list_for_each_entry(window, &bridge->windows) { - struct iommu_resv_region *region; - phys_addr_t start; - size_t length; - - if (resource_type(window->res) != IORESOURCE_MEM) - continue; - start = window->res->start - window->offset; - length = window->res->end - window->res->start + 1; - region = iommu_alloc_resv_region(start, length, 0, - IOMMU_RESV_RESERVED); - if (!region) - return; + if (!is_of_node(dev->iommu_fwspec->iommu_fwnode)) + iort_iommu_msi_get_resv_regions(dev, list); - list_add_tail(®ion->list, list); - } } EXPORT_SYMBOL(iommu_dma_get_resv_regions); @@ -229,6 +205,23 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, return 0; } +static void iova_reserve_pci_windows(struct pci_dev *dev, + struct iova_domain *iovad) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct resource_entry *window; + unsigned long lo, hi; + + resource_list_for_each_entry(window, &bridge->windows) { + if (resource_type(window->res) != IORESOURCE_MEM) + continue; + + lo = iova_pfn(iovad, window->res->start - window->offset); + hi = iova_pfn(iovad, window->res->end - window->offset); + reserve_iova(iovad, lo, hi); + } +} + static int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain) { @@ -238,6 +231,9 @@ static int iova_reserve_iommu_regions(struct device *dev, LIST_HEAD(resv_regions); int ret = 0; + if (dev_is_pci(dev)) + iova_reserve_pci_windows(to_pci_dev(dev), iovad); + iommu_get_resv_regions(dev, &resv_regions); list_for_each_entry(region, &resv_regions, list) { unsigned long lo, hi; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index accf58388bdb..460bed4fc5b1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, struct qi_desc desc; if (mask) { - BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); + WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 66f69af2c219..3062a154a9fb 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) irte->dest_id = IRTE_DEST(cfg->dest_apicid); /* Update the hardware only if the interrupt is in remapped mode. */ - if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) + if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) modify_irte(&ir_data->irq_2_iommu, irte); } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 5fc8656c60f9..0468acfa131f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1098,7 +1098,7 @@ static int rk_iommu_of_xlate(struct device *dev, data->iommu = platform_get_drvdata(iommu_dev); dev->archdata.iommu = data; - of_dev_put(iommu_dev); + platform_device_put(iommu_dev); return 0; } @@ -1175,8 +1175,15 @@ static int rk_iommu_probe(struct platform_device *pdev) for (i = 0; i < iommu->num_clocks; ++i) iommu->clocks[i].id = rk_iommu_clocks[i]; + /* + * iommu clocks should be present for all new devices and devicetrees + * but there are older devicetrees without clocks out in the wild. + * So clocks as optional for the time being. + */ err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); - if (err) + if (err == -ENOENT) + iommu->num_clocks = 0; + else if (err) return err; err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index f31265937439..7f0c0be322e0 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc) bit = readl_relaxed(combiner->regs[reg].addr); status = bit & combiner->regs[reg].enabled; - if (!status) + if (bit && !status) pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n", smp_processor_id(), bit, combiner->regs[reg].enabled, diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c index a6e87076acc2..5336bbdbfdc5 100644 --- a/drivers/isdn/mISDN/dsp_hwec.c +++ b/drivers/isdn/mISDN/dsp_hwec.c @@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) goto _do; { - char _dup[len + 1]; char *dup, *tok, *name, *val; int tmp; - strcpy(_dup, arg); - dup = _dup; + dup = kstrdup(arg, GFP_ATOMIC); + if (!dup) + return; while ((tok = strsep(&dup, ","))) { if (!strlen(tok)) @@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) deftaps = tmp; } } + + kfree(dup); } _do: diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 21d50e4cc5e1..b05022f94f18 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, u16 timebase, u8 *buf, int len) { u8 *p; - u8 frame[len + 32]; + u8 frame[MAX_DFRAME_LEN_L1 + 32]; struct socket *socket = NULL; if (debug & DEBUG_L1OIP_MSG) @@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) p = skb->data; l = skb->len; while (l) { - ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; + /* + * This is technically bounded by L1OIP_MAX_PERFRAME but + * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME + */ + ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l1oip_socket_send(hc, 0, dch->slot, 0, hc->chan[dch->slot].tx_counter++, p, ll); p += ll; @@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) p = skb->data; l = skb->len; while (l) { - ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; + /* + * This is technically bounded by L1OIP_MAX_PERFRAME but + * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME + */ + ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l1oip_socket_send(hc, hc->codec, bch->slot, 0, hc->chan[bch->slot].tx_counter, p, ll); hc->chan[bch->slot].tx_counter += ll; diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 004cc3cc6123..7fa2631b422c 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -290,7 +290,7 @@ do { \ if (kthread_should_stop() || \ test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ set_current_state(TASK_RUNNING); \ - return 0; \ + goto out; \ } \ \ schedule(); \ @@ -378,6 +378,9 @@ retry_invalidate: bch_prio_write(ca); } } +out: + wait_for_kthread_stop(); + return 0; } /* Allocation */ diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d338b7086013..3a0cfb237af9 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -392,6 +392,8 @@ struct cached_dev { #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 atomic_t io_errors; unsigned error_limit; + + char backing_dev_name[BDEVNAME_SIZE]; }; enum alloc_reserve { @@ -464,6 +466,8 @@ struct cache { atomic_long_t meta_sectors_written; atomic_long_t btree_sectors_written; atomic_long_t sectors_written; + + char cache_dev_name[BDEVNAME_SIZE]; }; struct gc_stat { diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 028f7b386e01..4e63c6f6c04d 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b) void bch_data_verify(struct cached_dev *dc, struct bio *bio) { - char name[BDEVNAME_SIZE]; struct bio *check; struct bio_vec bv, cbv; struct bvec_iter iter, citer = { 0 }; @@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) bv.bv_len), dc->disk.c, "verify failed at dev %s sector %llu", - bdevname(dc->bdev, name), + dc->backing_dev_name, (uint64_t) bio->bi_iter.bi_sector); kunmap_atomic(p1); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 7fac97ae036e..2ddf8515e6a5 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, /* IO errors */ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) { - char buf[BDEVNAME_SIZE]; unsigned errors; WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); @@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) errors = atomic_add_return(1, &dc->io_errors); if (errors < dc->error_limit) pr_err("%s: IO error on backing device, unrecoverable", - bio_devname(bio, buf)); + dc->backing_dev_name); else bch_cached_dev_error(dc); } @@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca, } if (error) { - char buf[BDEVNAME_SIZE]; unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, &ca->io_errors); errors >>= IO_ERROR_SHIFT; if (errors < ca->set->error_limit) pr_err("%s: IO error on %s%s", - bdevname(ca->bdev, buf), m, + ca->cache_dev_name, m, is_read ? ", recovering." : "."); else bch_cache_set_error(ca->set, "%s: too many IO errors %s", - bdevname(ca->bdev, buf), m); + ca->cache_dev_name, m); } } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a65e3365eeb9..8e3e8655ed63 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio) */ if (unlikely(s->iop.writeback && bio->bi_opf & REQ_PREFLUSH)) { - char buf[BDEVNAME_SIZE]; - - bio_devname(bio, buf); pr_err("Can't flush %s: returned bi_status %i", - buf, bio->bi_status); + dc->backing_dev_name, bio->bi_status); } else { /* set to orig_bio->bi_status in bio_complete() */ s->iop.status = bio->bi_status; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index d90d9e59ca00..3dea06b41d43 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); - char buf[BDEVNAME_SIZE]; struct closure cl; closure_init_stack(&cl); @@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_unlock(&bch_register_lock); - pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); + pr_info("Caching disabled for %s", dc->backing_dev_name); /* Drop ref we took in cached_dev_detach() */ closure_put(&dc->disk.cl); @@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, { uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; - char buf[BDEVNAME_SIZE]; struct cached_dev *exist_dc, *t; - bdevname(dc->bdev, buf); - if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) return -ENOENT; if (dc->disk.c) { - pr_err("Can't attach %s: already attached", buf); + pr_err("Can't attach %s: already attached", + dc->backing_dev_name); return -EINVAL; } if (test_bit(CACHE_SET_STOPPING, &c->flags)) { - pr_err("Can't attach %s: shutting down", buf); + pr_err("Can't attach %s: shutting down", + dc->backing_dev_name); return -EINVAL; } if (dc->sb.block_size < c->sb.block_size) { /* Will die */ pr_err("Couldn't attach %s: block size less than set's block size", - buf); + dc->backing_dev_name); return -EINVAL; } @@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { pr_err("Tried to attach %s but duplicate UUID already attached", - buf); + dc->backing_dev_name); return -EINVAL; } @@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, if (!u) { if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { - pr_err("Couldn't find uuid for %s in set", buf); + pr_err("Couldn't find uuid for %s in set", + dc->backing_dev_name); return -ENOENT; } u = uuid_find_empty(c); if (!u) { - pr_err("Not caching %s, no room for UUID", buf); + pr_err("Not caching %s, no room for UUID", + dc->backing_dev_name); return -EINVAL; } } @@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, up_write(&dc->writeback_lock); pr_info("Caching %s as %s on set %pU", - bdevname(dc->bdev, buf), dc->disk.disk->disk_name, + dc->backing_dev_name, + dc->disk.disk->disk_name, dc->disk.c->sb.set_uuid); return 0; } @@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cached_dev *dc) { - char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; struct cache_set *c; + bdevname(bdev, dc->backing_dev_name); memcpy(&dc->sb, sb, sizeof(struct cache_sb)); dc->bdev = bdev; dc->bdev->bd_holder = dc; @@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; get_page(sb_page); + if (cached_dev_init(dc, sb->block_size << 9)) goto err; @@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) goto err; - pr_info("registered backing device %s", bdevname(bdev, name)); + pr_info("registered backing device %s", dc->backing_dev_name); list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) @@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, return; err: - pr_notice("error %s: %s", bdevname(bdev, name), err); + pr_notice("error %s: %s", dc->backing_dev_name, err); bcache_device_stop(&dc->disk); } @@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) bool bch_cached_dev_error(struct cached_dev *dc) { - char name[BDEVNAME_SIZE]; + struct cache_set *c; if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) return false; @@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc) smp_mb(); pr_err("stop %s: too many IO errors on backing device %s\n", - dc->disk.disk->disk_name, bdevname(dc->bdev, name)); + dc->disk.disk->disk_name, dc->backing_dev_name); + + /* + * If the cached device is still attached to a cache set, + * even dc->io_disable is true and no more I/O requests + * accepted, cache device internal I/O (writeback scan or + * garbage collection) may still prevent bcache device from + * being stopped. So here CACHE_SET_IO_DISABLE should be + * set to c->flags too, to make the internal I/O to cache + * device rejected and stopped immediately. + * If c is NULL, that means the bcache device is not attached + * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. + */ + c = dc->disk.c; + if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) + pr_info("CACHE_SET_IO_DISABLE already set"); bcache_device_stop(&dc->disk); return true; @@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) return false; if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) - pr_warn("CACHE_SET_IO_DISABLE already set"); + pr_info("CACHE_SET_IO_DISABLE already set"); /* XXX: we can be called from atomic context acquire_console_sem(); @@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c, */ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", d->disk->disk_name); + /* + * There might be a small time gap that cache set is + * released but bcache device is not. Inside this time + * gap, regular I/O requests will directly go into + * backing device as no cache set attached to. This + * behavior may also introduce potential inconsistence + * data in writeback mode while cache is dirty. + * Therefore before calling bcache_device_stop() due + * to a broken cache device, dc->io_disable should be + * explicitly set to true. + */ + dc->io_disable = true; + /* make others know io_disable is true earlier */ + smp_mb(); bcache_device_stop(d); } else { /* @@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca) static int register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) { - char name[BDEVNAME_SIZE]; const char *err = NULL; /* must be set for any error case */ int ret = 0; - bdevname(bdev, name); - + bdevname(bdev, ca->cache_dev_name); memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; @@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, goto out; } - pr_info("registered cache device %s", name); + pr_info("registered cache device %s", ca->cache_dev_name); out: kobject_put(&ca->kobj); err: if (err) - pr_notice("error %s: %s", name, err); + pr_notice("error %s: %s", ca->cache_dev_name, err); return ret; } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 4a9547cdcdc5..ad45ebe1a74b 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio) struct keybuf_key *w = bio->bi_private; struct dirty_io *io = w->private; - if (bio->bi_status) + if (bio->bi_status) { SET_KEY_DIRTY(&w->key, false); + bch_count_backing_io_errors(io->dc, bio); + } closure_put(&io->cl); } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 12aa9ca21d8c..dc385b70e4c3 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign if (block_size <= KMALLOC_MAX_SIZE && (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { - snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size); - c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN, + unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE); + snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size); + c->slab_cache = kmem_cache_create(slab_name, block_size, align, SLAB_RECLAIM_ACCOUNT, NULL); if (!c->slab_cache) { r = -ENOMEM; diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c index 1d0af0a21fc7..84814e819e4c 100644 --- a/drivers/md/dm-cache-background-tracker.c +++ b/drivers/md/dm-cache-background-tracker.c @@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b) atomic_read(&b->pending_demotes) >= b->max_work; } -struct bt_work *alloc_work(struct background_tracker *b) +static struct bt_work *alloc_work(struct background_tracker *b) { if (max_work_reached(b)) return NULL; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 77d9fe58dae2..514fb4aec5d1 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str unsigned i; for (i = 0; i < ic->journal_sections; i++) kvfree(sl[i]); - kfree(sl); + kvfree(sl); } static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 580c49cc8079..5903e492bb34 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -23,6 +23,8 @@ #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ +#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1) + #define DM_RAID1_HANDLE_ERRORS 0x01 #define DM_RAID1_KEEP_LOG 0x02 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) @@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti) unsigned long error_bits; unsigned int i; - struct dm_io_region io[ms->nr_mirrors]; + struct dm_io_region io[MAX_NR_MIRRORS]; struct mirror *m; struct dm_io_request io_req = { .bi_op = REQ_OP_WRITE, @@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context) static void do_write(struct mirror_set *ms, struct bio *bio) { unsigned int i; - struct dm_io_region io[ms->nr_mirrors], *dest = io; + struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; struct mirror *m; struct dm_io_request io_req = { .bi_op = REQ_OP_WRITE, @@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) argc -= args_used; if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || - nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { + nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) { ti->error = "Invalid number of mirrors"; dm_dirty_log_destroy(dl); return -EINVAL; @@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type, int num_feature_args = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); - char buffer[ms->nr_mirrors + 1]; + char buffer[MAX_NR_MIRRORS + 1]; switch (type) { case STATUSTYPE_INFO: diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4ea404dbcf0b..0a7b0107ca78 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, - sector_t sector, int *srcu_idx) + sector_t sector, int *srcu_idx) + __acquires(md->io_barrier) { struct dm_table *map; struct dm_target *ti; @@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, } static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, void **kaddr, pfn_t *pfn) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; @@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, } static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) + void *addr, size_t bytes, struct iov_iter *i) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; diff --git a/drivers/md/md.c b/drivers/md/md.c index 3bea45e8ccff..c208c01f63a5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr) check_sb_changes(mddev, rdev); /* Read all rdev's to update recovery_offset */ - rdev_for_each_rcu(rdev, mddev) - read_rdev(mddev, rdev); + rdev_for_each_rcu(rdev, mddev) { + if (!test_bit(Faulty, &rdev->flags)) + read_rdev(mddev, rdev); + } } EXPORT_SYMBOL(md_reload_sb); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e2943fb74056..e9e3308cb0a7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf) * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -static void raise_barrier(struct r1conf *conf, sector_t sector_nr) +static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr) { int idx = sector_to_idx(sector_nr); @@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr) * max resync count which allowed on current I/O barrier bucket. */ wait_event_lock_irq(conf->wait_barrier, - !conf->array_frozen && + (!conf->array_frozen && !atomic_read(&conf->nr_pending[idx]) && - atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, + atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || + test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), conf->resync_lock); + if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { + atomic_dec(&conf->barrier[idx]); + spin_unlock_irq(&conf->resync_lock); + wake_up(&conf->wait_barrier); + return -EINTR; + } + atomic_inc(&conf->nr_sync_pending); spin_unlock_irq(&conf->resync_lock); + + return 0; } static void lower_barrier(struct r1conf *conf, sector_t sector_nr) @@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, goto skip_copy; } + behind_bio->bi_write_hint = bio->bi_write_hint; + while (i < vcnt && size) { struct page *page; int len = min_t(int, PAGE_SIZE, size); @@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bitmap_cond_end_sync(mddev->bitmap, sector_nr, mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); - r1_bio = raid1_alloc_init_r1buf(conf); - raise_barrier(conf, sector_nr); + + if (raise_barrier(conf, sector_nr)) + return 0; + + r1_bio = raid1_alloc_init_r1buf(conf); rcu_read_lock(); /* diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index e216cd768409..b07114b5efb2 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -20,7 +20,7 @@ // // VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl> // -// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> // SAA7111, SAA7113 and SAA7118 support #include "saa711x_regs.h" diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h index a50d480e101a..44fabe08234d 100644 --- a/drivers/media/i2c/saa711x_regs.h +++ b/drivers/media/i2c/saa711x_regs.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0+ * saa711x - Philips SAA711x video decoder register specifications * - * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> */ #define R_00_CHIP_VERSION 0x00 diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c index 1c5c61d829d6..9b4f21237810 100644 --- a/drivers/media/i2c/tda7432.c +++ b/drivers/media/i2c/tda7432.c @@ -8,7 +8,7 @@ * Muting and tone control by Jonathan Isom <jisom@ematic.com> * * Copyright (c) 2000 Eric Sandeen <eric_sandeen@bigfoot.com> - * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> * This code is placed under the terms of the GNU General Public License * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu) * Which was based on tda8425.c by Greg Alexander (c) 1998 diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 2476d812f669..1734ed4ede33 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -2,7 +2,7 @@ // // tvp5150 - Texas Instruments TVP5150A/AM1 and TVP5151 video decoder driver // -// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org> #include <dt-bindings/media/tvp5150.h> #include <linux/i2c.h> diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h index c43b7b844021..d3a764cae1a0 100644 --- a/drivers/media/i2c/tvp5150_reg.h +++ b/drivers/media/i2c/tvp5150_reg.h @@ -3,7 +3,7 @@ * * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers * - * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org> */ #define TVP5150_VD_IN_SRC_SEL_1 0x00 /* Video input source selection #1 */ diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index a26c1a3f7183..4599b7e28a8d 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -5,7 +5,7 @@ * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com> * * This code is partially based upon the TVP5150 driver - * written by Mauro Carvalho Chehab (mchehab@infradead.org), + * written by Mauro Carvalho Chehab <mchehab@kernel.org>, * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com> * and the TVP7002 driver in the TI LSP 2.10.00.14. Revisions by * Muralidharan Karicheri and Snehaprabha Narnakaje (TI). diff --git a/drivers/media/i2c/tvp7002_reg.h b/drivers/media/i2c/tvp7002_reg.h index 3c8c8b0a6a4c..7f56ba689dfe 100644 --- a/drivers/media/i2c/tvp7002_reg.h +++ b/drivers/media/i2c/tvp7002_reg.h @@ -5,7 +5,7 @@ * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com> * * This code is partially based upon the TVP5150 driver - * written by Mauro Carvalho Chehab (mchehab@infradead.org), + * written by Mauro Carvalho Chehab <mchehab@kernel.org>, * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com> * and the TVP7002 driver in the TI LSP 2.10.00.14 * diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index 67ac51eff15c..6b87a721dc49 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -4,7 +4,7 @@ * Copyright (C) 2010 Nokia Corporation * * Based on drivers/media/video/v4l2_dev.c code authored by - * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) + * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) * Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c index 9f1f9169fb5b..346fc7f58839 100644 --- a/drivers/media/pci/bt8xx/bttv-audio-hook.c +++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c @@ -1,7 +1,7 @@ /* * Handlers for board audio hooks, splitted from bttv-cards * - * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> * This code is placed under the terms of the GNU General Public License */ diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.h b/drivers/media/pci/bt8xx/bttv-audio-hook.h index 159d07adeff8..be16a537a03a 100644 --- a/drivers/media/pci/bt8xx/bttv-audio-hook.h +++ b/drivers/media/pci/bt8xx/bttv-audio-hook.h @@ -1,7 +1,7 @@ /* * Handlers for board audio hooks, splitted from bttv-cards * - * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> * This code is placed under the terms of the GNU General Public License */ diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c index 1902732f90e1..2616243b2c49 100644 --- a/drivers/media/pci/bt8xx/bttv-cards.c +++ b/drivers/media/pci/bt8xx/bttv-cards.c @@ -2447,7 +2447,7 @@ struct tvcard bttv_tvcards[] = { }, /* ---- card 0x88---------------------------------- */ [BTTV_BOARD_ACORP_Y878F] = { - /* Mauro Carvalho Chehab <mchehab@infradead.org> */ + /* Mauro Carvalho Chehab <mchehab@kernel.org> */ .name = "Acorp Y878F", .video_inputs = 3, /* .audio_inputs= 1, */ @@ -2688,7 +2688,7 @@ struct tvcard bttv_tvcards[] = { }, [BTTV_BOARD_ENLTV_FM_2] = { /* Encore TV Tuner Pro ENL TV-FM-2 - Mauro Carvalho Chehab <mchehab@infradead.org */ + Mauro Carvalho Chehab <mchehab@kernel.org> */ .name = "Encore ENL TV-FM-2", .video_inputs = 3, /* .audio_inputs= 1, */ diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 707f57a9f940..de3f44b8dec6 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -13,7 +13,7 @@ (c) 2005-2006 Nickolay V. Shmyrev <nshmyrev@yandex.ru> Fixes to be fully V4L2 compliant by - (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> + (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> Cropping and overscan support Copyright (C) 2005, 2006 Michael H. Schimek <mschimek@gmx.at> diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c index eccd1e3d717a..c76823eb399d 100644 --- a/drivers/media/pci/bt8xx/bttv-i2c.c +++ b/drivers/media/pci/bt8xx/bttv-i2c.c @@ -8,7 +8,7 @@ & Marcus Metzler (mocm@thp.uni-koeln.de) (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org> - (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> + (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> - Multituner support and i2c address binding This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c index be49589a61d2..395ff9bba759 100644 --- a/drivers/media/pci/cx23885/cx23885-input.c +++ b/drivers/media/pci/cx23885/cx23885-input.c @@ -13,7 +13,7 @@ * Copyright (C) 2008 <srinivasa.deevi at conexant dot com> * Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> * Markus Rechberger <mrechberger@gmail.com> - * Mauro Carvalho Chehab <mchehab@infradead.org> + * Mauro Carvalho Chehab <mchehab@kernel.org> * Sascha Sommer <saschasommer@freenet.de> * Copyright (C) 2004, 2005 Chris Pascoe * Copyright (C) 2003, 2004 Gerd Knorr diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index ab09bb55cf45..8a28fda703a2 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -4,7 +4,7 @@ * * (c) 2007 Trent Piepho <xyzzy@speakeasy.org> * (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org> - * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> * Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org> * Based on dummy.c by Jaroslav Kysela <perex@perex.cz> * @@ -103,7 +103,7 @@ MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s)."); MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards"); MODULE_AUTHOR("Ricardo Cerqueira"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX88_VERSION); diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c index 0e0952e60795..7a4876cf9f08 100644 --- a/drivers/media/pci/cx88/cx88-blackbird.c +++ b/drivers/media/pci/cx88/cx88-blackbird.c @@ -5,7 +5,7 @@ * (c) 2004 Jelle Foks <jelle@foks.us> * (c) 2004 Gerd Knorr <kraxel@bytesex.org> * - * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> * - video_ioctl2 conversion * * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/> diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c index 8bfa5b7ed91b..60988e95b637 100644 --- a/drivers/media/pci/cx88/cx88-core.c +++ b/drivers/media/pci/cx88/cx88-core.c @@ -4,7 +4,7 @@ * * (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * - * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c index f7692775fb5a..99f88a05a7c9 100644 --- a/drivers/media/pci/cx88/cx88-i2c.c +++ b/drivers/media/pci/cx88/cx88-i2c.c @@ -8,7 +8,7 @@ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net> * (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org> * - * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> * - Multituner support and i2c address binding * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 9be682cdb644..7b113bad70d2 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -5,7 +5,7 @@ * * (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * - * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c index 5ef635e72e10..4c52ac6d8bc5 100644 --- a/drivers/media/radio/radio-aimslab.c +++ b/drivers/media/radio/radio-aimslab.c @@ -4,7 +4,7 @@ * Copyright 1997 M. Kirkwood * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk> * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c index 9e12c6027359..840b7d60462b 100644 --- a/drivers/media/radio/radio-aztech.c +++ b/drivers/media/radio/radio-aztech.c @@ -2,7 +2,7 @@ * radio-aztech.c - Aztech radio card driver * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * Adapted to support the Video for Linux API by * Russell Kroll <rkroll@exploits.org>. Based on original tuner code by: * diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c index 3ff4c4e1435f..f051f8694ab9 100644 --- a/drivers/media/radio/radio-gemtek.c +++ b/drivers/media/radio/radio-gemtek.c @@ -15,7 +15,7 @@ * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * * Note: this card seems to swap the left and right audio channels! * diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 95f06f3b35dc..e4e758739246 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c @@ -27,7 +27,7 @@ * BUGS: * - card unmutes if you change frequency * - * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@infradead.org>: + * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@kernel.org>: * - Conversion to V4L2 API * - Uses video_ioctl2 for parsing and to add debug support */ diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c index abeaedd8d437..5a1470eb753e 100644 --- a/drivers/media/radio/radio-rtrack2.c +++ b/drivers/media/radio/radio-rtrack2.c @@ -7,7 +7,7 @@ * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * * Fully tested with actual hardware and the v4l2-compliance tool. */ diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c index fc4e63d36e4c..4f9b97edd9eb 100644 --- a/drivers/media/radio/radio-sf16fmi.c +++ b/drivers/media/radio/radio-sf16fmi.c @@ -13,7 +13,7 @@ * No volume control - only mute/unmute - you have to use line volume * control on SB-part of SF16-FMI/SF16-FMP/SF16-FMD * - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/kernel.h> /* __setup */ diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c index 4f116ea294fb..1af8f29cc7d1 100644 --- a/drivers/media/radio/radio-terratec.c +++ b/drivers/media/radio/radio-terratec.c @@ -17,7 +17,7 @@ * Volume Control is done digitally * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/module.h> /* Modules */ diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c index 26a8c6002121..a4bad322ffff 100644 --- a/drivers/media/radio/radio-trust.c +++ b/drivers/media/radio/radio-trust.c @@ -12,7 +12,7 @@ * Scott McGrath (smcgrath@twilight.vtc.vsc.edu) * William McGrath (wmcgrath@twilight.vtc.vsc.edu) * - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <stdarg.h> diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c index eb72a4d13758..d0d67ad85b8f 100644 --- a/drivers/media/radio/radio-typhoon.c +++ b/drivers/media/radio/radio-typhoon.c @@ -25,7 +25,7 @@ * The frequency change is necessary since the card never seems to be * completely silent. * - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/module.h> /* Modules */ diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c index 026e88eef29c..6007cd09b328 100644 --- a/drivers/media/radio/radio-zoltrix.c +++ b/drivers/media/radio/radio-zoltrix.c @@ -27,7 +27,7 @@ * 2002-07-15 - Fix Stereo typo * * 2006-07-24 - Converted to V4L2 API - * by Mauro Carvalho Chehab <mchehab@infradead.org> + * by Mauro Carvalho Chehab <mchehab@kernel.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> * diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c index f6977df1a75b..d275d98d066a 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c +++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c @@ -12,7 +12,7 @@ * * On Avermedia M135A with IR model RM-JX, the same codes exist on both * Positivo (BR) and original IR, initial version and remote control codes - * added by Mauro Carvalho Chehab <mchehab@infradead.org> + * added by Mauro Carvalho Chehab <mchehab@kernel.org> * * Positivo also ships Avermedia M135A with model RM-K6, extra control * codes added by Herton Ronaldo Krzesinski <herton@mandriva.com.br> diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c index e4e78c1f4123..057c13b765ef 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Encore ENLTV-FM v5.3 - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table encore_enltv_fm53[] = { diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c index c3d4437a6fda..cd0555924456 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv2.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton - Mauro Carvalho Chehab <mchehab@infradead.org> */ + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table encore_enltv2[] = { { 0x4c, KEY_POWER2 }, diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c index f0f88df18606..a00051339842 100644 --- a/drivers/media/rc/keymaps/rc-kaiomy.c +++ b/drivers/media/rc/keymaps/rc-kaiomy.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Kaiomy TVnPC U2 - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table kaiomy[] = { diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c index 453e04377de7..db5edde3eeb1 100644 --- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c +++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Kworld Plus TV Analog Lite PCI IR - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table kworld_plus_tv_analog[] = { diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c index 791130f108ff..e4e34f2ccf74 100644 --- a/drivers/media/rc/keymaps/rc-pixelview-new.c +++ b/drivers/media/rc/keymaps/rc-pixelview-new.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> present on PV MPEG 8000GT */ diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c index 88b3e80c38ad..d78a2bdb3e36 100644 --- a/drivers/media/tuners/tea5761.c +++ b/drivers/media/tuners/tea5761.c @@ -2,7 +2,7 @@ // For Philips TEA5761 FM Chip // I2C address is always 0x20 (0x10 at 7-bit mode). // -// Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org) +// Copyright (c) 2005-2007 Mauro Carvalho Chehab <mchehab@kernel.org> #include <linux/i2c.h> #include <linux/slab.h> @@ -337,5 +337,5 @@ EXPORT_SYMBOL_GPL(tea5761_attach); EXPORT_SYMBOL_GPL(tea5761_autodetection); MODULE_DESCRIPTION("Philips TEA5761 FM tuner driver"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c index 2b2c064d7dc3..016d0d5ec50b 100644 --- a/drivers/media/tuners/tea5767.c +++ b/drivers/media/tuners/tea5767.c @@ -2,7 +2,7 @@ // For Philips TEA5767 FM Chip used on some TV Cards like Prolink Pixelview // I2C address is always 0xC0. // -// Copyright (c) 2005 Mauro Carvalho Chehab (mchehab@infradead.org) +// Copyright (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> // // tea5767 autodetection thanks to Torsten Seeboth and Atsushi Nakagawa // from their contributions on DScaler. @@ -469,5 +469,5 @@ EXPORT_SYMBOL_GPL(tea5767_attach); EXPORT_SYMBOL_GPL(tea5767_autodetection); MODULE_DESCRIPTION("Philips TEA5767 FM tuner driver"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h index bb0437c36c03..50d017a4822a 100644 --- a/drivers/media/tuners/tuner-xc2028-types.h +++ b/drivers/media/tuners/tuner-xc2028-types.h @@ -5,7 +5,7 @@ * This file includes internal tipes to be used inside tuner-xc2028. * Shouldn't be included outside tuner-xc2028 * - * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org> */ /* xc3028 firmware types */ diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index fca85e08ebd7..84744e138982 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tuner-xc2028 // -// Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org) +// Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig (michel.ludwig@gmail.com) // - frontend interface @@ -1518,7 +1518,7 @@ EXPORT_SYMBOL(xc2028_attach); MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver"); MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE); MODULE_FIRMWARE(XC3028L_DEFAULT_FIRMWARE); diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h index 03fd6d4233a4..7b58bc06e35c 100644 --- a/drivers/media/tuners/tuner-xc2028.h +++ b/drivers/media/tuners/tuner-xc2028.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tuner-xc2028 * - * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org> */ #ifndef __TUNER_XC2028_H__ diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c index 3c2694a16ed1..d1e66b503f4d 100644 --- a/drivers/media/usb/em28xx/em28xx-camera.c +++ b/drivers/media/usb/em28xx/em28xx-camera.c @@ -2,7 +2,7 @@ // // em28xx-camera.c - driver for Empia EM25xx/27xx/28xx USB video capture devices // -// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org> // Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com> // // This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 6e0e67d23876..7c3203d7044b 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -5,7 +5,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> // diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index 36d341fb65dd..f28995383090 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -4,7 +4,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> // @@ -32,7 +32,7 @@ #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ "Markus Rechberger <mrechberger@gmail.com>, " \ - "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ + "Mauro Carvalho Chehab <mchehab@kernel.org>, " \ "Sascha Sommer <saschasommer@freenet.de>" MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index a54cb8dc52c9..3f493e0b0716 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -2,7 +2,7 @@ // // DVB device driver for em28xx // -// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org> +// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@kernel.org> // // (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com> // - Fixes for the driver to properly work with HVR-950 @@ -63,7 +63,7 @@ #include "tc90522.h" #include "qm1d1c0042.h" -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface"); MODULE_VERSION(EM28XX_VERSION); diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 9151bccd859a..6458682bc6e2 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -4,7 +4,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com> // diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index 2dc1be00b8b8..f84a1208d5d3 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c @@ -4,7 +4,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // // This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index d70ee13cc52e..68571bf36d28 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -5,7 +5,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> // @@ -44,7 +44,7 @@ #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ "Markus Rechberger <mrechberger@gmail.com>, " \ - "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ + "Mauro Carvalho Chehab <mchehab@kernel.org>, " \ "Sascha Sommer <saschasommer@freenet.de>" static unsigned int isoc_debug; diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 63c7c6124707..b0378e77ddff 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -4,7 +4,7 @@ * * Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com> * Ludovico Cavedon <cavedon@sssup.it> - * Mauro Carvalho Chehab <mchehab@infradead.org> + * Mauro Carvalho Chehab <mchehab@kernel.org> * Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> * * Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de> diff --git a/drivers/media/usb/gspca/zc3xx-reg.h b/drivers/media/usb/gspca/zc3xx-reg.h index a1bd94e8ce52..71fda38e85e0 100644 --- a/drivers/media/usb/gspca/zc3xx-reg.h +++ b/drivers/media/usb/gspca/zc3xx-reg.h @@ -1,7 +1,7 @@ /* * zc030x registers * - * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@kernel.org> * * The register aliases used here came from this driver: * http://zc0302.sourceforge.net/zc0302.php diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c index 70939e96b856..23df50aa0a4a 100644 --- a/drivers/media/usb/tm6000/tm6000-cards.c +++ b/drivers/media/usb/tm6000/tm6000-cards.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> #include <linux/init.h> #include <linux/module.h> diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c index 23a1332d98e6..d3229aa45fcb 100644 --- a/drivers/media/usb/tm6000/tm6000-core.c +++ b/drivers/media/usb/tm6000/tm6000-core.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> // - DVB-T support diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c index c9a62bbff27a..659b63febf85 100644 --- a/drivers/media/usb/tm6000/tm6000-i2c.c +++ b/drivers/media/usb/tm6000/tm6000-i2c.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> // - Fix SMBus Read Byte command diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h index 21587fcf11e3..d10424673db9 100644 --- a/drivers/media/usb/tm6000/tm6000-regs.h +++ b/drivers/media/usb/tm6000/tm6000-regs.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices * - * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> */ /* diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h index 5c615b0a7a46..b275dbce3a1b 100644 --- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h +++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices * - * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/videodev2.h> diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index b2399d4266da..aa85fe31c835 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> // - Fixed module load/unload diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h index e1e45770e28d..0864ed7314eb 100644 --- a/drivers/media/usb/tm6000/tm6000.h +++ b/drivers/media/usb/tm6000/tm6000.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices * - * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> * * Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - DVB-T support diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index 1d0b2208e8fb..c080dcc75393 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) - * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) + * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) * * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com> * - Added procfs support @@ -1072,7 +1072,7 @@ static void __exit videodev_exit(void) subsys_initcall(videodev_init); module_exit(videodev_exit) -MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index f48c505550e0..de5d96dbe69e 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) - * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) + * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) */ #include <linux/mm.h> diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 2b3981842b4b..7491b337002c 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -1,11 +1,11 @@ /* * generic helper functions for handling video4linux capture buffers * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify @@ -38,7 +38,7 @@ static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index e02353e340dd..f46132504d88 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -7,7 +7,7 @@ * Copyright (c) 2008 Magnus Damm * * Based on videobuf-vmalloc.c, - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index add2edb23eac..7770034aae28 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -6,11 +6,11 @@ * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify @@ -48,7 +48,7 @@ static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c index 2ff7fcc77b11..45fe781aeeec 100644 --- a/drivers/media/v4l2-core/videobuf-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf-vmalloc.c @@ -6,7 +6,7 @@ * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -41,7 +41,7 @@ static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c index 71a89d5d3efd..db8043019ec6 100644 --- a/drivers/memory/emif-asm-offsets.c +++ b/drivers/memory/emif-asm-offsets.c @@ -16,77 +16,7 @@ int main(void) { - DEFINE(EMIF_SDCFG_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_sdcfg_val)); - DEFINE(EMIF_TIMING1_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_timing1_val)); - DEFINE(EMIF_TIMING2_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_timing2_val)); - DEFINE(EMIF_TIMING3_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_timing3_val)); - DEFINE(EMIF_REF_CTRL_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_ref_ctrl_val)); - DEFINE(EMIF_ZQCFG_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_zqcfg_val)); - DEFINE(EMIF_PMCR_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_pmcr_val)); - DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val)); - DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET, - offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl)); - DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET, - offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh)); - DEFINE(EMIF_COS_CONFIG_OFFSET, - offsetof(struct emif_regs_amx3, emif_cos_config)); - DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET, - offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping)); - DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET, - offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map)); - DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET, - offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map)); - DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_ocp_config_val)); - DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET, - offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim)); - DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET, - offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw)); - DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val)); - DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET, - offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw)); - DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET, - offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1)); - DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET, - offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals)); - DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3)); - - BLANK(); - - DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET, - offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt)); - DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET, - offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys)); - DEFINE(EMIF_PM_CONFIG_OFFSET, - offsetof(struct ti_emif_pm_data, ti_emif_sram_config)); - DEFINE(EMIF_PM_REGS_VIRT_OFFSET, - offsetof(struct ti_emif_pm_data, regs_virt)); - DEFINE(EMIF_PM_REGS_PHYS_OFFSET, - offsetof(struct ti_emif_pm_data, regs_phys)); - DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data)); - - BLANK(); - - DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET, - offsetof(struct ti_emif_pm_functions, save_context)); - DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, - offsetof(struct ti_emif_pm_functions, restore_context)); - DEFINE(EMIF_PM_ENTER_SR_OFFSET, - offsetof(struct ti_emif_pm_functions, enter_sr)); - DEFINE(EMIF_PM_EXIT_SR_OFFSET, - offsetof(struct ti_emif_pm_functions, exit_sr)); - DEFINE(EMIF_PM_ABORT_SR_OFFSET, - offsetof(struct ti_emif_pm_functions, abort_sr)); - DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions)); + ti_emif_asm_offsets(); return 0; } diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 8e0acd197c43..6af946d16d24 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -9,6 +9,7 @@ * published by the Free Software Foundation. */ +#include <linux/bitops.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io-64-nonatomic-hi-lo.h> @@ -62,6 +63,17 @@ * need a custom accessor. */ +static unsigned long global_flags; +/* + * Workaround for avoiding to use RX DMAC by multiple channels. + * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use + * RX DMAC simultaneously, sometimes hundreds of bytes data are not + * stored into the system memory even if the DMAC interrupt happened. + * So, this driver then uses one RX DMAC channel only. + */ +#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0 +#define SDHI_INTERNAL_DMAC_RX_IN_USE 1 + /* Definitions for sampling clocks */ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { @@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, RST_RESERVED_BITS | val); + if (host->data && host->data->flags & MMC_DATA_READ) + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + renesas_sdhi_internal_dmac_enable_dma(host, true); } @@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, if (data->flags & MMC_DATA_READ) { dtran_mode |= DTRAN_MODE_CH_NUM_CH1; dir = DMA_FROM_DEVICE; + if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && + test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) + goto force_pio; } else { dtran_mode |= DTRAN_MODE_CH_NUM_CH0; dir = DMA_TO_DEVICE; @@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) renesas_sdhi_internal_dmac_enable_dma(host, false); dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); + if (dir == DMA_FROM_DEVICE) + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + tmio_mmc_do_data_irq(host); out: spin_unlock_irq(&host->lock); @@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * implementation as others may use a different implementation. */ static const struct soc_device_attribute gen3_soc_whitelist[] = { - { .soc_id = "r8a7795", .revision = "ES1.*" }, - { .soc_id = "r8a7795", .revision = "ES2.0" }, - { .soc_id = "r8a7796", .revision = "ES1.0" }, - { .soc_id = "r8a77995", .revision = "ES1.0" }, - { /* sentinel */ } + { .soc_id = "r8a7795", .revision = "ES1.*", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, + { .soc_id = "r8a7795", .revision = "ES2.0" }, + { .soc_id = "r8a7796", .revision = "ES1.0", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, + { .soc_id = "r8a77995", .revision = "ES1.0" }, + { /* sentinel */ } }; static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - if (!soc_device_match(gen3_soc_whitelist)) + const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); + + if (!soc) return -ENODEV; + global_flags |= (unsigned long)soc->data; + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 787434e5589d..78c25ad35fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev) pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); } -static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) +static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) { struct sdhci_pci_slot *slot = sdhci_priv(host); struct pci_dev *pdev = slot->chip->pdev; @@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) return 0; } +static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* AMD requires custom HS200 tuning */ + if (host->timing == MMC_TIMING_MMC_HS200) + return amd_execute_tuning_hs200(host, opcode); + + /* Otherwise perform standard SDHCI tuning */ + return sdhci_execute_tuning(mmc, opcode); +} + +static int amd_probe_slot(struct sdhci_pci_slot *slot) +{ + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + + ops->execute_tuning = amd_execute_tuning; + + return 0; +} + static int amd_probe(struct sdhci_pci_chip *chip) { struct pci_dev *smbus_dev; @@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .platform_execute_tuning = amd_execute_tuning, }; static const struct sdhci_pci_fixes sdhci_amd = { .probe = amd_probe, .ops = &amd_sdhci_pci_ops, + .probe_slot = amd_probe_slot, }; static const struct pci_device_id pci_ids[] = { diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index d4c07b85f18e..f5695be14499 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -45,6 +45,7 @@ #define I82802AB 0x00ad #define I82802AC 0x00ac #define PF38F4476 0x881c +#define M28F00AP30 0x8963 /* STMicroelectronics chips */ #define M50LPW080 0x002F #define M50FLW080A 0x0080 @@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi, extp->MinorVersion = '1'; } +static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip) +{ + /* + * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t + * Erase Supend for their small Erase Blocks(0x8000) + */ + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30) + return 1; + return 0; +} + static inline struct cfi_pri_intelext * read_pri_intelext(struct map_info *map, __u16 adr) { @@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) goto sleep; + /* Do not allow suspend iff read/write to EB address */ + if ((adr & chip->in_progress_block_mask) == + chip->in_progress_block_addr) + goto sleep; + + /* do not suspend small EBs, buggy Micron Chips */ + if (cfi_is_micron_28F00AP30(cfi, chip) && + (chip->in_progress_block_mask == ~(0x8000-1))) + goto sleep; /* Erase suspend */ - map_write(map, CMD(0xB0), adr); + map_write(map, CMD(0xB0), chip->in_progress_block_addr); /* If the flash has finished erasing, then 'erase suspend' * appears to make some (28F320) flash devices switch to * 'read' mode. Make sure that we switch to 'read status' * mode so we get the right data. --rmk */ - map_write(map, CMD(0x70), adr); + map_write(map, CMD(0x70), chip->in_progress_block_addr); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { - status = map_read(map, adr); + status = map_read(map, chip->in_progress_block_addr); if (map_word_andequal(map, status, status_OK, status_OK)) break; @@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad sending the 0x70 (Read Status) command to an erasing chip and expecting it to be ignored, that's what we do. */ - map_write(map, CMD(0xd0), adr); - map_write(map, CMD(0x70), adr); + map_write(map, CMD(0xd0), chip->in_progress_block_addr); + map_write(map, CMD(0x70), chip->in_progress_block_addr); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; @@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, map_write(map, CMD(0xD0), adr); chip->state = FL_ERASING; chip->erase_suspended = 0; + chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(len - 1); ret = INVAL_CACHE_AND_WAIT(map, chip, adr, adr, len, diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 668e2cbc155b..692902df2598 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) goto sleep; - /* We could check to see if we're trying to access the sector - * that is currently being erased. However, no user will try - * anything like that so we just wait for the timeout. */ + /* Do not allow suspend iff read/write to EB address */ + if ((adr & chip->in_progress_block_mask) == + chip->in_progress_block_addr) + goto sleep; /* Erase suspend */ /* It's harmless to issue the Erase-Suspend and Erase-Resume @@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(map->size - 1); INVALIDATE_CACHE_UDELAY(map, chip, adr, map->size, @@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(len - 1); INVALIDATE_CACHE_UDELAY(map, chip, adr, len, diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index d0cd6f8635d7..9c9f8936b63b 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) ret = nanddev_erase(nand, &pos); if (ret) { einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); - einfo->state = MTD_ERASE_FAILED; return ret; } @@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) nanddev_pos_next_eraseblock(nand, &pos); } - einfo->state = MTD_ERASE_DONE; - return 0; } EXPORT_SYMBOL_GPL(nanddev_mtd_erase); diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c index 9c159f0dd9a6..321137158ff3 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/omap2.c @@ -375,56 +375,42 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; - dma_addr_t dma_src, dma_dst; - int bram_offset; + struct device *dev = &c->pdev->dev; void *buf = (void *)buffer; + dma_addr_t dma_src, dma_dst; + int bram_offset, err; size_t xtra; - int ret; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; - if (bram_offset & 3 || (size_t)buf & 3 || count < 384) - goto out_copy; - - /* panic_write() may be in an interrupt context */ - if (in_interrupt() || oops_in_progress) + /* + * If the buffer address is not DMA-able, len is not long enough to make + * DMA transfers profitable or panic_write() may be in an interrupt + * context fallback to PIO mode. + */ + if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || + count < 384 || in_interrupt() || oops_in_progress ) goto out_copy; - if (buf >= high_memory) { - struct page *p1; - - if (((size_t)buf & PAGE_MASK) != - ((size_t)(buf + count - 1) & PAGE_MASK)) - goto out_copy; - p1 = vmalloc_to_page(buf); - if (!p1) - goto out_copy; - buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); - } - xtra = count & 3; if (xtra) { count -= xtra; memcpy(buf + count, this->base + bram_offset + count, xtra); } + dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE); dma_src = c->phys_base + bram_offset; - dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE); - if (dma_mapping_error(&c->pdev->dev, dma_dst)) { - dev_err(&c->pdev->dev, - "Couldn't DMA map a %d byte buffer\n", - count); - goto out_copy; - } - ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); - dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); - - if (ret) { - dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); + if (dma_mapping_error(dev, dma_dst)) { + dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count); goto out_copy; } - return 0; + err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); + dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE); + if (!err) + return 0; + + dev_err(dev, "timeout waiting for DMA\n"); out_copy: memcpy(buf, this->base + bram_offset, count); @@ -437,49 +423,34 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; - dma_addr_t dma_src, dma_dst; - int bram_offset; + struct device *dev = &c->pdev->dev; void *buf = (void *)buffer; - int ret; + dma_addr_t dma_src, dma_dst; + int bram_offset, err; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; - if (bram_offset & 3 || (size_t)buf & 3 || count < 384) - goto out_copy; - - /* panic_write() may be in an interrupt context */ - if (in_interrupt() || oops_in_progress) + /* + * If the buffer address is not DMA-able, len is not long enough to make + * DMA transfers profitable or panic_write() may be in an interrupt + * context fallback to PIO mode. + */ + if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || + count < 384 || in_interrupt() || oops_in_progress ) goto out_copy; - if (buf >= high_memory) { - struct page *p1; - - if (((size_t)buf & PAGE_MASK) != - ((size_t)(buf + count - 1) & PAGE_MASK)) - goto out_copy; - p1 = vmalloc_to_page(buf); - if (!p1) - goto out_copy; - buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); - } - - dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); + dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); dma_dst = c->phys_base + bram_offset; - if (dma_mapping_error(&c->pdev->dev, dma_src)) { - dev_err(&c->pdev->dev, - "Couldn't DMA map a %d byte buffer\n", - count); - return -1; - } - - ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); - dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); - - if (ret) { - dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); + if (dma_mapping_error(dev, dma_src)) { + dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count); goto out_copy; } - return 0; + err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); + dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE); + if (!err) + return 0; + + dev_err(dev, "timeout waiting for DMA\n"); out_copy: memcpy(this->base + bram_offset, buf, count); diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 10e953218948..db5ec4e8bde9 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, return ret; ret = marvell_nfc_wait_op(chip, - chip->data_interface.timings.sdr.tPROG_max); + PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); return ret; } @@ -1408,6 +1408,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; + u32 xtype; int ret; struct marvell_nfc_op nfc_op = { .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD, @@ -1423,7 +1424,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, * last naked write. */ if (chunk == 0) { - nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) | + if (lt->nchunks == 1) + xtype = XTYPE_MONOLITHIC_RW; + else + xtype = XTYPE_WRITE_DISPATCH; + + nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) | NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | NDCB0_CMD1(NAND_CMD_SEQIN); nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page); @@ -1494,7 +1500,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd, } ret = marvell_nfc_wait_op(chip, - chip->data_interface.timings.sdr.tPROG_max); + PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); marvell_nfc_disable_hw_ecc(chip); @@ -2299,29 +2305,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, /* * The legacy "num-cs" property indicates the number of CS on the only * chip connected to the controller (legacy bindings does not support - * more than one chip). CS are only incremented one by one while the RB - * pin is always the #0. + * more than one chip). The CS and RB pins are always the #0. * * When not using legacy bindings, a couple of "reg" and "nand-rb" * properties must be filled. For each chip, expressed as a subnode, * "reg" points to the CS lines and "nand-rb" to the RB line. */ - if (pdata) { + if (pdata || nfc->caps->legacy_of_bindings) { nsels = 1; - } else if (nfc->caps->legacy_of_bindings && - !of_get_property(np, "num-cs", &nsels)) { - dev_err(dev, "missing num-cs property\n"); - return -EINVAL; - } else if (!of_get_property(np, "reg", &nsels)) { - dev_err(dev, "missing reg property\n"); - return -EINVAL; - } - - if (!pdata) - nsels /= sizeof(u32); - if (!nsels) { - dev_err(dev, "invalid reg property size\n"); - return -EINVAL; + } else { + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); + if (nsels <= 0) { + dev_err(dev, "missing/invalid reg property\n"); + return -EINVAL; + } } /* Alloc the nand chip structure */ diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 72f3a89da513..f28c3a555861 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo) */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) { + const struct nand_sdr_timings *timings; u8 status = 0; int ret; if (!chip->exec_op) return -ENOTSUPP; + /* Wait tWB before polling the STATUS reg. */ + timings = nand_get_sdr_timings(&chip->data_interface); + ndelay(PSEC_TO_NSEC(timings->tWB_max)); + ret = nand_status_op(chip, NULL); if (ret) return ret; diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index f54518ffb36a..f2052fae21c7 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev) writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); - clk = clk_get(&pdev->dev, NULL); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 4b8e9183489a..5872f31eaa60 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf, void __iomem *reg_base = cqspi->iobase; void __iomem *ahb_base = cqspi->ahb_base; unsigned int remaining = n_rx; + unsigned int mod_bytes = n_rx % 4; unsigned int bytes_to_read = 0; + u8 *rxbuf_end = rxbuf + n_rx; int ret = 0; writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); @@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf, } while (bytes_to_read != 0) { + unsigned int word_remain = round_down(remaining, 4); + bytes_to_read *= cqspi->fifo_width; bytes_to_read = bytes_to_read > remaining ? remaining : bytes_to_read; - ioread32_rep(ahb_base, rxbuf, - DIV_ROUND_UP(bytes_to_read, 4)); + bytes_to_read = round_down(bytes_to_read, 4); + /* Read 4 byte word chunks then single bytes */ + if (bytes_to_read) { + ioread32_rep(ahb_base, rxbuf, + (bytes_to_read / 4)); + } else if (!word_remain && mod_bytes) { + unsigned int temp = ioread32(ahb_base); + + bytes_to_read = mod_bytes; + memcpy(rxbuf, &temp, min((unsigned int) + (rxbuf_end - rxbuf), + bytes_to_read)); + } rxbuf += bytes_to_read; remaining -= bytes_to_read; bytes_to_read = cqspi_get_rd_sram_level(cqspi); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 1ed9529e7bd1..5eb0df2e5464 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) { int i; - if (!client_info->slave) + if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst)) return; for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { @@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; + netdev_dbg(slave->bond->dev, + "Send learning packet: dev %s mac %pM vlan %d\n", + slave->dev->name, mac_addr, vid); + if (vid) __vlan_hwaccel_put_tag(skb, vlan_proto, vid); @@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) u8 *mac_addr = data->mac_addr; struct bond_vlan_tag *tags; - if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { - if (strict_match && - ether_addr_equal_64bits(mac_addr, - upper->dev_addr)) { + if (is_vlan_dev(upper) && + bond->nest_level == vlan_get_encap_level(upper) - 1) { + if (upper->addr_assign_type == NET_ADDR_STOLEN) { alb_send_lp_vid(slave, mac_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); - } else if (!strict_match) { + } else { alb_send_lp_vid(slave, upper->dev_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b7b113018853..1f1e97b26f95 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } /* switch(bond_mode) */ #ifdef CONFIG_NET_POLL_CONTROLLER - slave_dev->npinfo = bond->dev->npinfo; - if (slave_dev->npinfo) { + if (bond->dev->npinfo) { if (slave_enable_netpoll(new_slave)) { netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); res = -EBUSY; @@ -1739,6 +1738,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (bond_mode_uses_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); + bond->nest_level = dev_get_nest_level(bond_dev); + netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, bond_is_active_slave(new_slave) ? "an active" : "a backup", diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index b1779566c5bb..3c71f1cb205f 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -605,7 +605,7 @@ void can_bus_off(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - netdev_dbg(dev, "bus-off\n"); + netdev_info(dev, "bus-off\n"); netif_carrier_off(dev); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 634c51e6b8ae..d53a45bf2a72 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -200,6 +200,7 @@ #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ +#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */ /* Structure of the message buffer */ struct flexcan_mb { @@ -288,6 +289,12 @@ struct flexcan_priv { static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN, +}; + +static const struct flexcan_devtype_data fsl_imx25_devtype_data = { + .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; @@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev) static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, - { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, }, - { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, }, - { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, }, + { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, }, + { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, }, + { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, }, { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, @@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev) priv = netdev_priv(dev); - if (of_property_read_bool(pdev->dev.of_node, "big-endian")) { + if (of_property_read_bool(pdev->dev.of_node, "big-endian") || + devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { priv->read = flexcan_read_be; priv->write = flexcan_write_be; } else { - if (of_device_is_compatible(pdev->dev.of_node, - "fsl,p1010-flexcan")) { - priv->read = flexcan_read_be; - priv->write = flexcan_write_be; - } else { - priv->read = flexcan_read_le; - priv->write = flexcan_write_le; - } + priv->read = flexcan_read_le; + priv->write = flexcan_write_le; } priv->can.clock.freq = clock_freq; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 5590c559a8ca..53e320c92a8b 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -91,6 +91,7 @@ #define HI3110_STAT_BUSOFF BIT(2) #define HI3110_STAT_ERRP BIT(3) #define HI3110_STAT_ERRW BIT(4) +#define HI3110_STAT_TXMTY BIT(7) #define HI3110_BTR0_SJW_SHIFT 6 #define HI3110_BTR0_BRP_SHIFT 0 @@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net, struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; + mutex_lock(&priv->hi3110_lock); bec->txerr = hi3110_read(spi, HI3110_READ_TEC); bec->rxerr = hi3110_read(spi, HI3110_READ_REC); + mutex_unlock(&priv->hi3110_lock); return 0; } @@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) } } - if (intf == 0) - break; - - if (intf & HI3110_INT_TXCPLT) { + if (priv->tx_len && statf & HI3110_STAT_TXMTY) { net->stats.tx_packets++; net->stats.tx_bytes += priv->tx_len - 1; can_led_event(net, CAN_LED_EVENT_TX); @@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) } netif_wake_queue(net); } + + if (intf == 0) + break; } mutex_unlock(&priv->hi3110_lock); return IRQ_HANDLED; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 63587b8e6825..daed57d3d209 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, skb = alloc_can_skb(priv->netdev, &cf); if (!skb) { - stats->tx_dropped++; + stats->rx_dropped++; return; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 3d2091099f7f..5b4374f21d76 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3370,6 +3370,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3391,6 +3392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3410,6 +3412,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 8, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3431,6 +3434,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3452,6 +3456,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3472,6 +3477,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 11, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x10, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, @@ -3493,6 +3499,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3514,6 +3521,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3535,6 +3543,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3557,6 +3566,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3578,6 +3588,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3600,6 +3611,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3621,6 +3633,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3641,6 +3654,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3663,6 +3677,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, @@ -3684,6 +3699,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 11, .max_vid = 8191, .port_base_addr = 0x0, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, @@ -3707,6 +3723,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3730,6 +3747,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, @@ -3753,6 +3771,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3776,6 +3795,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3798,6 +3818,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 11, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x10, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, @@ -3820,6 +3841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3841,6 +3863,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3863,6 +3886,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, @@ -3885,6 +3909,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, @@ -3907,6 +3932,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, + .phy_base_addr = 0x0, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 80490f66bc06..12b7f4649b25 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -114,6 +114,7 @@ struct mv88e6xxx_info { unsigned int num_gpio; unsigned int max_vid; unsigned int port_base_addr; + unsigned int phy_base_addr; unsigned int global1_addr; unsigned int global2_addr; unsigned int age_time_coeff; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 0ce627fded48..8d22d66d84b7 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1118,7 +1118,7 @@ int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip, err = irq; goto out; } - bus->irq[chip->info->port_base_addr + phy] = irq; + bus->irq[chip->info->phy_base_addr + phy] = irq; } return 0; out: diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index ac7694c71266..a036c490b7ce 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, struct sk_buff_head *rxq) { u16 buf[4] = { 0 }, status, seq_id; - u64 ns, timelo, timehi; struct skb_shared_hwtstamps *shwt; + struct sk_buff_head received; + u64 ns, timelo, timehi; + unsigned long flags; int err; + /* The latched timestamp belongs to one of the received frames. */ + __skb_queue_head_init(&received); + spin_lock_irqsave(&rxq->lock, flags); + skb_queue_splice_tail_init(rxq, &received); + spin_unlock_irqrestore(&rxq->lock, flags); + mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_ptp_read(chip, ps->port_id, reg, buf, ARRAY_SIZE(buf)); @@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, /* Since the device can only handle one time stamp at a time, * we purge any extra frames from the queue. */ - for ( ; skb; skb = skb_dequeue(rxq)) { + for ( ; skb; skb = __skb_dequeue(&received)) { if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { ns = timehi << 16 | timelo; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ea72ef11a55..d272dc6984ac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1321,6 +1321,10 @@ #define MDIO_VEND2_AN_STAT 0x8002 #endif +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif @@ -1369,6 +1373,10 @@ #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7d128be61310..b91143947ed2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) "debugfs_create_file failed\n"); } + if (pdata->vdata->an_cdr_workaround) { + pfile = debugfs_create_bool("an_cdr_workaround", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_workaround); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + + pfile = debugfs_create_bool("an_cdr_track_early", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_track_early); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + } + kfree(buf); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 795e556d4a3f..441d0973957b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround; ret = pdata->phy_if.phy_init(pdata); if (ret) return ret; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 072b9f664597..1b45cd73a258 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; + netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); } static void xgbe_an_restart(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata) static void xgbe_an_disable(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); } return XGBE_AN_PAGE_RECEIVED; @@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) return XGBE_AN_NO_LINK; } - xgbe_an73_disable(pdata); + xgbe_an_disable(pdata); xgbe_switch_mode(pdata); - xgbe_an73_restart(pdata); + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; } @@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } @@ -903,6 +914,9 @@ again: pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index eb23f9ba1a9a..82d1f416ee2a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct xgbe_version_data xgbe_v2b = { @@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3304a291aa96..aac884314000 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -147,6 +147,14 @@ /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 +/* CDR delay values for KR support (in usec) */ +#define XGBE_CDR_DELAY_INIT 10000 +#define XGBE_CDR_DELAY_INC 10000 +#define XGBE_CDR_DELAY_MAX 100000 + +/* RRC frequency during link status check */ +#define XGBE_RRC_FREQUENCY 10 + enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, @@ -245,6 +253,10 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 +#define XGBE_SFP_EXTD_OPT1 1 +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) + #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) @@ -324,6 +336,7 @@ struct xgbe_phy_data { unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; @@ -355,6 +368,10 @@ struct xgbe_phy_data { unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; }; /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ @@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) phy_data->sfp_phy_avail = 1; } +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) +{ + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) + return true; + + return false; +} + static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) return; + /* Update transceiver signals (eeprom extd/options) */ + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); + if (xgbe_phy_sfp_parse_quirks(pdata)) return; @@ -1184,7 +1248,6 @@ put: static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int gpio_input; u8 gpio_reg, gpio_ports[2]; int ret; @@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) return; } - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; - - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) { - /* No GPIO, just assume the module is present for now */ - phy_data->sfp_mod_absent = 0; - } else { - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) - phy_data->sfp_mod_absent = 0; - } - - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) && - (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) - phy_data->sfp_rx_los = 1; + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) - phy_data->sfp_tx_fault = 1; + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) @@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 1; /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++) { + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } @@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) return true; } +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + usleep_range(phy_data->phy_cdr_delay, + phy_data->phy_cdr_delay + 500); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_OFF); + + xgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + if (!pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + if (pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case XGBE_AN_READY: + case XGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; + else + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + break; + } + break; + default: + break; + } +} + +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* Power off the PHY */ xgbe_phy_power_off(pdata); @@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: @@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } } + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + /* Register for driving external PHYs */ mii = devm_mdiobus_alloc(pdata->dev); if (!mii) { @@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->an_pre = xgbe_phy_an_pre; + phy_impl->an_post = xgbe_phy_an_post; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ad102c8bac7b..95d4b56448c6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -833,6 +833,7 @@ struct xgbe_hw_if { /* This structure represents implementation specific routines for an * implementation of a PHY. All routines are required unless noted below. * Optional routines: + * an_pre, an_post * kr_training_pre, kr_training_post */ struct xgbe_phy_impl_if { @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if { /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct xgbe_prv_data *); + void (*an_post)(struct xgbe_prv_data *); + /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); @@ -989,6 +994,7 @@ struct xgbe_version_data { unsigned int irq_reissue_support; unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; + unsigned int an_cdr_workaround; }; struct xgbe_vxlan_data { @@ -1257,6 +1263,9 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; + + bool debugfs_an_cdr_workaround; + bool debugfs_an_cdr_track_early; }; /* Function prototypes*/ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 32f6d2e24d66..1a1a6380c128 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -95,6 +95,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self) /*rss rings */ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); cfg->vecs = min(cfg->vecs, num_online_cpus()); + cfg->vecs = min(cfg->vecs, self->irqvecs); /* cfg->vecs should be power of 2 for RSS */ if (cfg->vecs >= 8U) cfg->vecs = 8U; @@ -246,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->hw_features |= aq_hw_caps->hw_features; self->ndev->features = aq_hw_caps->hw_features; + self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 219b550d1665..faa533a0ec47 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -80,6 +80,7 @@ struct aq_nic_s { struct pci_dev *pdev; unsigned int msix_entry_mask; + u32 irqvecs; }; static inline struct device *aq_nic_get_dev(struct aq_nic_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index ecc6306f940f..a50e08bb4748 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -267,16 +267,16 @@ static int aq_pci_probe(struct pci_dev *pdev, numvecs = min(numvecs, num_online_cpus()); /*enable interrupts */ #if !AQ_CFG_FORCE_LEGACY_INT - err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, - PCI_IRQ_MSIX); - - if (err < 0) { - err = pci_alloc_irq_vectors(self->pdev, 1, 1, - PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (err < 0) - goto err_hwinit; + numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs, + PCI_IRQ_MSIX | PCI_IRQ_MSI | + PCI_IRQ_LEGACY); + + if (numvecs < 0) { + err = numvecs; + goto err_hwinit; } #endif + self->irqvecs = numvecs; /* net device init */ aq_nic_cfg_start(self); @@ -298,9 +298,9 @@ err_free_aq_hw: kfree(self->aq_hw); err_ioremap: free_netdev(ndev); -err_pci_func: - pci_release_regions(pdev); err_ndev: + pci_release_regions(pdev); +err_pci_func: pci_disable_device(pdev); return err; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9a3c1a76d5d..f33b25fbca63 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_select_queue = bcm_sysport_select_queue, }; -static int bcm_sysport_map_queues(struct net_device *dev, +static int bcm_sysport_map_queues(struct notifier_block *nb, struct dsa_notifier_register_info *info) { - struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_tx_ring *ring; + struct bcm_sysport_priv *priv; struct net_device *slave_dev; unsigned int num_tx_queues; unsigned int q, start, port; + struct net_device *dev; + + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); + if (priv->netdev != info->master) + return 0; + + dev = info->master; /* We can't be setting up queue inspection for non directly attached * switches @@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev, if (priv->is_lite) netif_set_real_num_tx_queues(slave_dev, slave_dev->num_tx_queues / 2); + num_tx_queues = slave_dev->real_num_tx_queues; if (priv->per_port_num_tx_queues && priv->per_port_num_tx_queues != num_tx_queues) - netdev_warn(slave_dev, "asymetric number of per-port queues\n"); + netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); priv->per_port_num_tx_queues = num_tx_queues; @@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev, return 0; } -static int bcm_sysport_dsa_notifier(struct notifier_block *unused, +static int bcm_sysport_dsa_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { struct dsa_notifier_register_info *info; @@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused, info = ptr; - return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); + return notifier_from_errno(bcm_sysport_map_queues(nb, info)); } #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f622ca2a64f..8ba14ae00e8f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) return retval; } -static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) +static void bnxt_get_pkgver(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); u16 index = 0; - u32 datalen; + char *pkgver; + u32 pkglen; + u8 *pkgbuf; + int len; if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, NULL, &datalen) != 0) - return NULL; + &index, NULL, &pkglen) != 0) + return; - memset(buf, 0, buflen); - if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) - return NULL; + pkgbuf = kzalloc(pkglen, GFP_KERNEL); + if (!pkgbuf) { + dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", + pkglen); + return; + } + + if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) + goto err; - return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, - datalen); + pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, + pkglen); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { + len = strlen(bp->fw_ver_str); + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + "/pkg %s", pkgver); + } +err: + kfree(pkgbuf); } static int bnxt_get_eeprom(struct net_device *dev, @@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp) struct hwrm_selftest_qlist_input req = {0}; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; - char *pkglog; int i, rc; - pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); - if (pkglog) { - char *pkgver; - int len; + bnxt_get_pkgver(dev); - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { - len = strlen(bp->fw_ver_str); - snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, - "/pkg %s", pkgver); - } - kfree(pkglog); - } if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 73f2249555b5..83444811d3c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type { #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) -#define BNX_PKG_LOG_MAX_LENGTH 4096 - enum bnxnvm_pkglog_field_index { BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 08bbb639be1a..9f59b1270a7c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); - /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ - tg3_full_lock(tp, 0); + /* tp->hw_stats can be referenced safely: + * 1. under rtnl_lock + * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. + */ if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } - tg3_full_unlock(tp); } /* @@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev, struct tg3 *tp = netdev_priv(dev); spin_lock_bh(&tp->lock); - if (!tp->hw_stats) { + if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); return; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 24d2865b8806..005283c7cdfe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3433,8 +3433,8 @@ static int adap_config_hma(struct adapter *adapter) sgl = adapter->hma.sgt->sgl; node = dev_to_node(adapter->pdev_dev); for_each_sg(sgl, iter, sgt->orig_nents, i) { - newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL, - page_order); + newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL | + __GFP_ZERO, page_order); if (!newpage) { dev_err(adapter->pdev_dev, "Not enough memory for HMA page allocation\n"); @@ -5474,6 +5474,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } spin_lock_init(&adapter->mbox_lock); INIT_LIST_HEAD(&adapter->mlist.list); + adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; pci_set_drvdata(pdev, adapter); if (func != ent->driver_data) { @@ -5508,8 +5509,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; } - adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; - /* PCI device has been enabled */ adapter->flags |= DEV_ENABLED; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 4df282ed22c7..0beee2cc2ddd 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = { static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-single-collision", "tx-multiple-collision", - "tx-late-collsion", + "tx-late-collision", "tx-aborted-frames", "tx-lost-frames", "tx-carrier-sense-errors", diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 3e62692af011..fa5b30f547f6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -87,7 +87,7 @@ do { \ #define HNAE_AE_REGISTER 0x1 -#define RCB_RING_NAME_LEN 16 +#define RCB_RING_NAME_LEN (IFNAMSIZ + 4) #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 #define HNAE_LOW_LATENCY_COAL_PARAM 80 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index aad5658d79d5..6e8d6a6f6aaf 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); - struct device *dev = &adapter->vdev->dev; + int retry_count = 0; int rc; do { - if (adapter->renegotiate) { - adapter->renegotiate = false; + if (retry_count > IBMVNIC_MAX_QUEUES) { + netdev_warn(netdev, "Login attempts exceeded\n"); + return -1; + } + + adapter->init_done_rc = 0; + reinit_completion(&adapter->init_done); + rc = send_login(adapter); + if (rc) { + netdev_warn(netdev, "Unable to login\n"); + return rc; + } + + if (!wait_for_completion_timeout(&adapter->init_done, + timeout)) { + netdev_warn(netdev, "Login timed out\n"); + return -1; + } + + if (adapter->init_done_rc == PARTIALSUCCESS) { + retry_count++; release_sub_crqs(adapter, 1); + adapter->init_done_rc = 0; reinit_completion(&adapter->init_done); send_cap_queries(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - dev_err(dev, "Capabilities query timeout\n"); + netdev_warn(netdev, + "Capabilities query timed out\n"); return -1; } + rc = init_sub_crqs(adapter); if (rc) { - dev_err(dev, - "Initialization of SCRQ's failed\n"); + netdev_warn(netdev, + "SCRQ initialization failed\n"); return -1; } + rc = init_sub_crq_irqs(adapter); if (rc) { - dev_err(dev, - "Initialization of SCRQ's irqs failed\n"); + netdev_warn(netdev, + "SCRQ irq initialization failed\n"); return -1; } - } - - reinit_completion(&adapter->init_done); - rc = send_login(adapter); - if (rc) { - dev_err(dev, "Unable to attempt device login\n"); - return rc; - } else if (!wait_for_completion_timeout(&adapter->init_done, - timeout)) { - dev_err(dev, "Login timeout\n"); + } else if (adapter->init_done_rc) { + netdev_warn(netdev, "Adapter login failed\n"); return -1; } - } while (adapter->renegotiate); + } while (adapter->init_done_rc == PARTIALSUCCESS); /* handle pending MAC address changes after successful login */ if (adapter->mac_change_pending) { @@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev) netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->rx_scrq[i]->irq); - else - enable_scrq_irq(adapter, adapter->rx_scrq[i]); + enable_scrq_irq(adapter, adapter->rx_scrq[i]); } for (i = 0; i < adapter->req_tx_queues; i++) { netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->tx_scrq[i]->irq); - else - enable_scrq_irq(adapter, adapter->tx_scrq[i]); + enable_scrq_irq(adapter, adapter->tx_scrq[i]); } rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); @@ -1115,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter) if (!adapter->rx_pool) return; - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_scrqs = adapter->num_active_rx_pools; rx_entries = adapter->req_rx_add_entries_per_subcrq; /* Free any remaining skbs in the rx buffer pools */ @@ -1164,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!adapter->tx_pool || !adapter->tso_pool) return; - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_scrqs = adapter->num_active_tx_pools; /* Free any remaining skbs in the tx buffer pools */ for (i = 0; i < tx_scrqs; i++) { @@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) if (adapter->tx_scrq[i]->irq) { netdev_dbg(netdev, "Disabling tx_scrq[%d] irq\n", i); + disable_scrq_irq(adapter, adapter->tx_scrq[i]); disable_irq(adapter->tx_scrq[i]->irq); } } @@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) if (adapter->rx_scrq[i]->irq) { netdev_dbg(netdev, "Disabling rx_scrq[%d] irq\n", i); + disable_scrq_irq(adapter, adapter->rx_scrq[i]); disable_irq(adapter->rx_scrq[i]->irq); } } @@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason != VNIC_RESET_FAILOVER) + if (adapter->reset_reason != VNIC_RESET_FAILOVER && + adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) netdev_notify_peers(netdev); netif_carrier_on(netdev); @@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, { struct device *dev = &adapter->vdev->dev; unsigned long rc; + u64 val; if (scrq->hw_irq > 0x100000000ULL) { dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); return 1; } + val = (0xff000000) | scrq->hw_irq; + rc = plpar_hcall_norets(H_EOI, val); + if (rc) + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", + val, rc); + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); if (rc) @@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) struct vnic_login_client_data { u8 type; __be16 len; - char name; + char name[]; } __packed; static int vnic_client_data_len(struct ibmvnic_adapter *adapter) @@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, vlcd->type = 1; len = strlen(os_name) + 1; vlcd->len = cpu_to_be16(len); - strncpy(&vlcd->name, os_name, len); - vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + strncpy(vlcd->name, os_name, len); + vlcd = (struct vnic_login_client_data *)(vlcd->name + len); /* Type 2 - LPAR name */ vlcd->type = 2; len = strlen(utsname()->nodename) + 1; vlcd->len = cpu_to_be16(len); - strncpy(&vlcd->name, utsname()->nodename, len); - vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + strncpy(vlcd->name, utsname()->nodename, len); + vlcd = (struct vnic_login_client_data *)(vlcd->name + len); /* Type 3 - device name */ vlcd->type = 3; len = strlen(adapter->netdev->name) + 1; vlcd->len = cpu_to_be16(len); - strncpy(&vlcd->name, adapter->netdev->name, len); + strncpy(vlcd->name, adapter->netdev->name, len); } static int send_login(struct ibmvnic_adapter *adapter) @@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, * to resend the login buffer with fewer queues requested. */ if (login_rsp_crq->generic.rc.code) { - adapter->renegotiate = true; + adapter->init_done_rc = login_rsp_crq->generic.rc.code; complete(&adapter->init_done); return 0; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 99c0b58c2c39..22391e8805f6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1035,7 +1035,6 @@ struct ibmvnic_adapter { struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; - bool renegotiate; /* rx structs */ struct napi_struct *napi; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 5b13ca1bd85f..7dc5f045e969 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) - /* Action type = 5 - Large Action */ + /* Action type = 5 - Generic Value */ #define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 21977ec984c4..71d032cc5fa7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_aq_desc desc; enum ice_status status; u16 flags; + u8 i; cmd = &desc.params.mac_read; @@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, return ICE_ERR_CFG; } - ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); - ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); + /* A single port can report up to two (LAN and WoL) addresses */ + for (i = 0; i < cmd->num_addr; i++) + if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { + ether_addr_copy(hw->port_info->mac.lan_addr, + resp[i].mac_addr); + ether_addr_copy(hw->port_info->mac.perm_addr, + resp[i].mac_addr); + break; + } + return 0; } @@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - /* Get port MAC information */ - mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); - mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); + /* Get MAC information */ + /* A single port can report up to two (LAN and WoL) addresses */ + mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, + sizeof(struct ice_aqc_manage_mac_read_resp), + GFP_KERNEL); + mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { status = ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 5909a4407e38..7c511f144ed6 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -1014,10 +1014,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, desc = ICE_CTL_Q_DESC(cq->rq, ntc); desc_idx = ntc; + cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; - cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event received with error 0x%x\n", cq->rq_last_status); diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 1b9e2ef48a9d..499904874b3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -121,8 +121,6 @@ #define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_INTEVENT_S 0 -#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S) #define PFINT_OICR_HLP_RDY_S 14 #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) #define PFINT_OICR_CPM_RDY_S 15 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 210b7910f1cd..5299caf55a7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); - if (!(oicr & PFINT_OICR_INTEVENT_M)) - goto ena_intr; - if (oicr & PFINT_OICR_GRST_M) { u32 reset; /* we have a reset warning */ @@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } ret = IRQ_HANDLED; -ena_intr: /* re-enable interrupt causes that are not handled during this pass */ wr32(hw, PFINT_OICR_ENA, ena_mask); if (!test_bit(__ICE_DOWN, pf->state)) { diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index f16ff3e4a840..2e6c1d92cc88 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, u16 num_added = 0; u32 temp; + *num_nodes_added = 0; + if (!num_nodes) return status; if (!parent || layer < hw->sw_entry_point_layer) return ICE_ERR_PARAM; - *num_nodes_added = 0; - /* max children per node per layer */ max_child_nodes = le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c1c0bc30a16d..cce7ada89255 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable) { + if (enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !enable) { + /* max "linkspeed" idleslope in kbps */ + idleslope = 1000000; + hicredit = ETH_FRAME_LEN; + } + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 68af127987bc..cead23e3db0c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -943,8 +943,8 @@ err2: kfree(ipsec->ip_tbl); kfree(ipsec->rx_tbl); kfree(ipsec->tx_tbl); + kfree(ipsec); err1: - kfree(adapter->ipsec); netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 3123267dfba9..9592f3e3e42e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3427,6 +3427,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = false; } + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + /* Reset PHY */ if (!hw->phy.reset_disable && hw->phy.ops.reset) hw->phy.ops.reset(hw); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3d9033f26eff..850f8af95e49 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) if (!err) continue; hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); - break; + goto err_setup_tx; } return 0; @@ -4137,7 +4137,7 @@ out_drop: return NETDEV_TX_OK; } -static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 54a038943c06..6f410235987c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -663,7 +663,7 @@ enum mvpp2_tag_type { #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) @@ -916,6 +916,8 @@ static struct { #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) + /* Definitions */ /* Shared Packet Processor resources */ @@ -940,6 +942,7 @@ struct mvpp2 { struct clk *pp_clk; struct clk *gop_clk; struct clk *mg_clk; + struct clk *mg_core_clk; struct clk *axi_clk; /* List of pointers to port structures */ @@ -1429,7 +1432,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, if (port->priv->hw_version == MVPP21) return tx_desc->pp21.buf_dma_addr; else - return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); + return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, @@ -1447,7 +1450,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, } else { u64 val = (u64)addr; - tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); + tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } @@ -1507,7 +1510,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, if (port->priv->hw_version == MVPP21) return rx_desc->pp21.buf_dma_addr; else - return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); + return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, @@ -1516,7 +1519,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, if (port->priv->hw_version == MVPP21) return rx_desc->pp21.buf_cookie; else - return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); + return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, @@ -8766,18 +8769,27 @@ static int mvpp2_probe(struct platform_device *pdev) err = clk_prepare_enable(priv->mg_clk); if (err < 0) goto err_gop_clk; + + priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); + if (IS_ERR(priv->mg_core_clk)) { + priv->mg_core_clk = NULL; + } else { + err = clk_prepare_enable(priv->mg_core_clk); + if (err < 0) + goto err_mg_clk; + } } priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); if (IS_ERR(priv->axi_clk)) { err = PTR_ERR(priv->axi_clk); if (err == -EPROBE_DEFER) - goto err_gop_clk; + goto err_mg_core_clk; priv->axi_clk = NULL; } else { err = clk_prepare_enable(priv->axi_clk); if (err < 0) - goto err_gop_clk; + goto err_mg_core_clk; } /* Get system's tclk rate */ @@ -8789,9 +8801,9 @@ static int mvpp2_probe(struct platform_device *pdev) } if (priv->hw_version == MVPP22) { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); if (err) - goto err_mg_clk; + goto err_axi_clk; /* Sadly, the BM pools all share the same register to * store the high 32 bits of their address. So they * must all have the same high 32 bits, which forces @@ -8799,14 +8811,14 @@ static int mvpp2_probe(struct platform_device *pdev) */ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) - goto err_mg_clk; + goto err_axi_clk; } /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); - goto err_mg_clk; + goto err_axi_clk; } /* Initialize ports */ @@ -8819,7 +8831,7 @@ static int mvpp2_probe(struct platform_device *pdev) if (priv->port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; - goto err_mg_clk; + goto err_axi_clk; } /* Statistics must be gathered regularly because some of them (like @@ -8847,8 +8859,13 @@ err_port_probe: mvpp2_port_remove(priv->port_list[i]); i++; } -err_mg_clk: +err_axi_clk: clk_disable_unprepare(priv->axi_clk); + +err_mg_core_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_core_clk); +err_mg_clk: if (priv->hw_version == MVPP22) clk_disable_unprepare(priv->mg_clk); err_gop_clk: @@ -8895,6 +8912,7 @@ static int mvpp2_remove(struct platform_device *pdev) return 0; clk_disable_unprepare(priv->axi_clk); + clk_disable_unprepare(priv->mg_core_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a30a2e95d13f..f11b45001cad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1027,6 +1027,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev, if (!coal->tx_max_coalesced_frames_irq) return -EINVAL; + if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) { + netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n", + __func__, MLX4_EN_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS || + coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) { + netdev_info(dev, "%s: maximum coalesced frames supported is %d\n", + __func__, MLX4_EN_MAX_COAL_PKTS); + return -ERANGE; + } + priv->rx_frames = (coal->rx_max_coalesced_frames == MLX4_EN_AUTO_CONF) ? MLX4_EN_RX_COAL_TARGET : diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e0adac4a9a19..9670b33fc9b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3324,12 +3324,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_ring[t]) { err = -ENOMEM; - goto err_free_tx; + goto out; } priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_cq[t]) { - kfree(priv->tx_ring[t]); err = -ENOMEM; goto out; } @@ -3582,11 +3581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, return 0; -err_free_tx: - while (t--) { - kfree(priv->tx_ring[t]); - kfree(priv->tx_cq[t]); - } out: mlx4_en_destroy_netdev(dev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bfef69235d71..211578ffc70d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev) ret = mlx4_unbond_fs_rules(dev); if (ret) - mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); + mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret); ret1 = mlx4_unbond_mac_table(dev); if (ret1) { mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f7c81133594f..ace6545f82e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -132,6 +132,9 @@ #define MLX4_EN_TX_COAL_PKTS 16 #define MLX4_EN_TX_COAL_TIME 0x10 +#define MLX4_EN_MAX_COAL_PKTS U16_MAX +#define MLX4_EN_MAX_COAL_TIME U16_MAX + #define MLX4_EN_RX_RATE_LOW 400000 #define MLX4_EN_RX_COAL_TIME_LOW 0 #define MLX4_EN_RX_RATE_HIGH 450000 @@ -552,8 +555,8 @@ struct mlx4_en_priv { u16 rx_usecs_low; u32 pkt_rate_high; u16 rx_usecs_high; - u16 sample_interval; - u16 adaptive_rx_coal; + u32 sample_interval; + u32 adaptive_rx_coal; u32 msg_enable; u32 loopback_ok; u32 validate_loopback; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 3d46ef48d5b8..c641d5656b2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto out; - new_channels.params = priv->channels.params; mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto out; + } + /* Skip if tx_min_inline is the same */ if (new_channels.params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index d8f68e4d1018..876c3e4c6193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { }; static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) + struct mlx5e_params *params, u16 mtu) { u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->hard_mtu = MLX5E_ETH_HARD_MTU; + params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; @@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, priv->channels.params.num_channels = profile->max_nch(mdev); - mlx5e_build_rep_params(mdev, &priv->channels.params); + mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); mlx5e_build_rep_netdev(netdev); mlx5e_timestamp_init(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 707976482c09..027f54ac1ca2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { netdev_err(priv->netdev, - "\tCan't perform loobpack test while device is down\n"); + "\tCan't perform loopback test while device is down\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4197001f9801..b94276db3ce9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1261,6 +1261,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->mask); addr_type = key->addr_type; + /* the HW doesn't support frag first/later */ + if (mask->flags & FLOW_DIS_FIRST_FRAG) + return -EOPNOTSUPP; + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, @@ -1864,7 +1868,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + if (modify_ip_header && ip_proto != IPPROTO_TCP && + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 20297108528a..5532aa3675c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, } return num_dma; + +dma_unmap_wqe_err: + mlx5e_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; } static inline void @@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c1c94974e16b..1814f803bd2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -34,6 +34,9 @@ #include <linux/module.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> +#ifdef CONFIG_RFS_ACCEL +#include <linux/cpu_rmap.h> +#endif #include "mlx5_core.h" #include "fpga/core.h" #include "eswitch.h" @@ -923,3 +926,28 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, MLX5_SET(query_eq_in, in, eq_number, eq->eqn); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } + +/* This function should only be called after mlx5_cmd_force_teardown_hca */ +void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq; + +#ifdef CONFIG_RFS_ACCEL + if (dev->rmap) { + free_irq_cpu_rmap(dev->rmap); + dev->rmap = NULL; + } +#endif + list_for_each_entry(eq, &table->comp_eqs_list, list) + free_irq(eq->irqn, eq); + + free_irq(table->pages_eq.irqn, &table->pages_eq); + free_irq(table->async_eq.irqn, &table->async_eq); + free_irq(table->cmd_eq.irqn, &table->cmd_eq); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) + free_irq(table->pfault_eq.irqn, &table->pfault_eq); +#endif + pci_free_irq_vectors(dev->pdev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 332bc56306bf..1352d13eedb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2175,26 +2175,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, memset(vf_stats, 0, sizeof(*vf_stats)); vf_stats->rx_packets = MLX5_GET_CTR(out, received_eth_unicast.packets) + + MLX5_GET_CTR(out, received_ib_unicast.packets) + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_ib_multicast.packets) + MLX5_GET_CTR(out, received_eth_broadcast.packets); vf_stats->rx_bytes = MLX5_GET_CTR(out, received_eth_unicast.octets) + + MLX5_GET_CTR(out, received_ib_unicast.octets) + MLX5_GET_CTR(out, received_eth_multicast.octets) + + MLX5_GET_CTR(out, received_ib_multicast.octets) + MLX5_GET_CTR(out, received_eth_broadcast.octets); vf_stats->tx_packets = MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + + MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + + MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); vf_stats->tx_bytes = MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + + MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + + MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); vf_stats->multicast = - MLX5_GET_CTR(out, received_eth_multicast.packets); + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_ib_multicast.packets); vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index de51e7c39bc8..c39c1692e674 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node); static void del_sw_hw_rule(struct fs_node *node); static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2); +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); static struct mlx5_flow_rule * find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest); @@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node) if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && --fte->dests_size) { - modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; update_fte = true; goto out; @@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) static int init_root_ns(struct mlx5_flow_steering *steering) { + int err; + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); if (!steering->root_ns) - goto cleanup; + return -ENOMEM; - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) - goto cleanup; + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); + if (err) + goto out_err; set_prio_attrs(steering->root_ns); - - if (create_anchor_flow_table(steering)) - goto cleanup; + err = create_anchor_flow_table(steering); + if (err) + goto out_err; return 0; -cleanup: - mlx5_cleanup_fs(steering->dev); - return -ENOMEM; +out_err: + cleanup_root_ns(steering->root_ns); + steering->root_ns = NULL; + return err; } static void clean_tree(struct fs_node *node) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 63a8ea31601c..e2c465b0b3f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1587,6 +1587,14 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) mlx5_enter_error_state(dev, true); + /* Some platforms requiring freeing the IRQ's in the shutdown + * flow. If they aren't freed they can't be allocated after + * kexec. There is no need to cleanup the mlx5_core software + * contexts. + */ + mlx5_irq_clear_affinity_hints(dev); + mlx5_core_eq_free_irqs(dev); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 7d001fe6e631..023882d9a22e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -128,6 +128,8 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u32 *out, int outlen); int mlx5_start_eqs(struct mlx5_core_dev *dev); void mlx5_stop_eqs(struct mlx5_core_dev *dev); +/* This function should only be called after mlx5_cmd_force_teardown_hca */ +void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); void mlx5_cq_tasklet_cb(unsigned long data); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 93ea56620a24..e13ac3b8dff7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1100,11 +1100,11 @@ err_emad_init: err_alloc_lag_mapping: mlxsw_ports_fini(mlxsw_core); err_ports_init: - mlxsw_bus->fini(bus_priv); -err_bus_init: if (!reload) devlink_resources_unregister(devlink, NULL); err_register_resources: + mlxsw_bus->fini(bus_priv); +err_bus_init: if (!reload) devlink_free(devlink); err_devlink_alloc: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c11c9a635866..4ed01182a82c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *dev = mlxsw_sp_port->dev; int err; - if (bridge_port->bridge_device->multicast_enabled) { - if (bridge_port->bridge_device->multicast_enabled) { - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, - false); - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); - } + if (bridge_port->bridge_device->multicast_enabled && + !bridge_port->mrouter) { + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); } err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index b3567a596fc1..80df9a5d4217 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -183,17 +183,21 @@ static int nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, const struct tc_action *action, struct nfp_fl_pre_tunnel *pre_tun, - enum nfp_flower_tun_type tun_type) + enum nfp_flower_tun_type tun_type, + struct net_device *netdev) { size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + struct net *net; if (ip_tun->options_len) return -EOPNOTSUPP; + net = dev_net(netdev); + set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; @@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; @@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a, *a_len += sizeof(struct nfp_fl_pre_tunnel); set_tun = (void *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type); + err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type, + netdev); if (err) return err; *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 3735c09d2112..577659f332e4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: nfp_tunnel_keep_alive(app, skb); break; - case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: - /* Acks from the NFP that the route is added - ignore. */ - break; default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); @@ -275,18 +272,49 @@ out: void nfp_flower_cmsg_process_rx(struct work_struct *work) { + struct sk_buff_head cmsg_joined; struct nfp_flower_priv *priv; struct sk_buff *skb; priv = container_of(work, struct nfp_flower_priv, cmsg_work); + skb_queue_head_init(&cmsg_joined); + + spin_lock_bh(&priv->cmsg_skbs_high.lock); + skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined); + spin_unlock_bh(&priv->cmsg_skbs_high.lock); - while ((skb = skb_dequeue(&priv->cmsg_skbs))) + spin_lock_bh(&priv->cmsg_skbs_low.lock); + skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined); + spin_unlock_bh(&priv->cmsg_skbs_low.lock); + + while ((skb = __skb_dequeue(&cmsg_joined))) nfp_flower_cmsg_process_one_rx(priv->app, skb); } -void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +static void +nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) { struct nfp_flower_priv *priv = app->priv; + struct sk_buff_head *skb_head; + + if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || + type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) + skb_head = &priv->cmsg_skbs_high; + else + skb_head = &priv->cmsg_skbs_low; + + if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) { + nfp_flower_cmsg_warn(app, "Dropping queued control messages\n"); + dev_kfree_skb_any(skb); + return; + } + + skb_queue_tail(skb_head, skb); + schedule_work(&priv->cmsg_work); +} + +void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +{ struct nfp_flower_cmsg_hdr *cmsg_hdr; cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); @@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_process_mtu_ack(app, skb)) { /* Handle MTU acks outside wq to prevent RTNL conflict. */ dev_consume_skb_any(skb); + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { + /* Acks from the NFP that the route is added - ignore. */ + dev_consume_skb_any(skb); } else { - skb_queue_tail(&priv->cmsg_skbs, skb); - schedule_work(&priv->cmsg_work); + nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 96bc0e33980c..bee4367a2c38 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -108,6 +108,8 @@ #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) +#define NFP_FLOWER_WORKQ_MAX_SKBS 30000 + #define nfp_flower_cmsg_warn(app, fmt, args...) \ do { \ if (net_ratelimit()) \ @@ -188,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 reserved; __be64 tun_id __packed; __be32 tun_type_index; - __be32 extra[3]; + __be16 reserved2; + u8 ttl; + u8 reserved3; + __be32 extra[2]; }; /* Metadata with L2 (1W/4B) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 6357e0720f43..84e3b9f5abb1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -52,8 +52,6 @@ #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL -#define NFP_FLOWER_FRAME_HEADROOM 158 - static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) { return "FLOWER"; @@ -360,7 +358,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) } SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); - nfp_net_get_mac_addr(app->pf, port); + nfp_net_get_mac_addr(app->pf, repr, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); err = nfp_repr_init(app, repr, @@ -519,7 +517,8 @@ static int nfp_flower_init(struct nfp_app *app) app->priv = app_priv; app_priv->app = app; - skb_queue_head_init(&app_priv->cmsg_skbs); + skb_queue_head_init(&app_priv->cmsg_skbs_high); + skb_queue_head_init(&app_priv->cmsg_skbs_low); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); init_waitqueue_head(&app_priv->reify_wait_queue); @@ -549,7 +548,8 @@ static void nfp_flower_clean(struct nfp_app *app) { struct nfp_flower_priv *app_priv = app->priv; - skb_queue_purge(&app_priv->cmsg_skbs); + skb_queue_purge(&app_priv->cmsg_skbs_high); + skb_queue_purge(&app_priv->cmsg_skbs_low); flush_work(&app_priv->cmsg_work); nfp_flower_metadata_cleanup(app); @@ -557,22 +557,6 @@ static void nfp_flower_clean(struct nfp_app *app) app->priv = NULL; } -static int -nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev, - int new_mtu) -{ - /* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the - * supported max MTU to allow for appending tunnel headers. To prevent - * unexpected behaviour this needs to be accounted for. - */ - if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) { - nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu); - return -EINVAL; - } - - return 0; -} - static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) { bool ret; @@ -654,7 +638,6 @@ const struct nfp_app_type app_flower = { .init = nfp_flower_init, .clean = nfp_flower_clean, - .check_mtu = nfp_flower_check_mtu, .repr_change_mtu = nfp_flower_repr_change_mtu, .vnic_alloc = nfp_flower_vnic_alloc, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index e030b3ce4510..c67e1b54c614 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -107,7 +107,10 @@ struct nfp_mtu_conf { * @mask_table: Hash table used to store masks * @flow_table: Hash table used to store flower rules * @cmsg_work: Workqueue for control messages processing - * @cmsg_skbs: List of skbs for control message processing + * @cmsg_skbs_high: List of higher priority skbs for control message + * processing + * @cmsg_skbs_low: List of lower priority skbs for control message + * processing * @nfp_mac_off_list: List of MAC addresses to offload * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs * @nfp_ipv4_off_list: List of IPv4 addresses to offload @@ -136,7 +139,8 @@ struct nfp_flower_priv { DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); struct work_struct cmsg_work; - struct sk_buff_head cmsg_skbs; + struct sk_buff_head cmsg_skbs_high; + struct sk_buff_head cmsg_skbs_low; struct list_head nfp_mac_off_list; struct list_head nfp_mac_index_list; struct list_head nfp_ipv4_off_list; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index 2a2f2fbc8850..b9618c37403f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, if (err) return err < 0 ? err : 0; - nfp_net_get_mac_addr(app->pf, nn->port); + nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index add46e28212b..42211083b51f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf); int nfp_hwmon_register(struct nfp_pf *pf); void nfp_hwmon_unregister(struct nfp_pf *pf); -void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port); +void +nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, + struct nfp_port *port); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 15fa47f622aa..45cd2092e498 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -67,23 +67,26 @@ /** * nfp_net_get_mac_addr() - Get the MAC address. * @pf: NFP PF handle + * @netdev: net_device to set MAC address on * @port: NFP port structure * * First try to get the MAC address from NSP ETH table. If that * fails generate a random address. */ -void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) +void +nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, + struct nfp_port *port) { struct nfp_eth_table_port *eth_port; eth_port = __nfp_port_get_eth_port(port); if (!eth_port) { - eth_hw_addr_random(port->netdev); + eth_hw_addr_random(netdev); return; } - ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr); - ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr); + ether_addr_copy(netdev->dev_addr, eth_port->mac_addr); + ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); } static struct nfp_eth_table_port * @@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) return PTR_ERR(mem); } - min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); - pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", - "net.macstats", min_size, - &pf->mac_stats_bar); - if (IS_ERR(pf->mac_stats_mem)) { - if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { - err = PTR_ERR(pf->mac_stats_mem); - goto err_unmap_ctrl; + if (pf->eth_tbl) { + min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); + pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", + "net.macstats", min_size, + &pf->mac_stats_bar); + if (IS_ERR(pf->mac_stats_mem)) { + if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { + err = PTR_ERR(pf->mac_stats_mem); + goto err_unmap_ctrl; + } + pf->mac_stats_mem = NULL; } - pf->mac_stats_mem = NULL; } pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index f7b958181126..cb28ac03e4ca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) break; err = msleep_interruptible(timeout_ms); - if (err != 0) + if (err != 0) { + nfp_info(mutex->cpp, + "interrupted waiting for NFP mutex\n"); return -ERESTARTSYS; + } if (time_is_before_eq_jiffies(warn_at)) { warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 99bb679a9801..2abee0fe3a7c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, if ((*reg & mask) == val) return 0; - if (msleep_interruptible(25)) - return -ERESTARTSYS; + msleep(25); if (time_after(start_time, wait_until)) return -ETIMEDOUT; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 27364b7572fc..b092894dd128 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1170,7 +1170,7 @@ static void *nixge_get_nvmem_address(struct device *dev) cell = nvmem_cell_get(dev, "address"); if (IS_ERR(cell)) - return cell; + return NULL; mac = nvmem_cell_read(cell, &cell_size); nvmem_cell_put(cell); @@ -1183,7 +1183,7 @@ static int nixge_probe(struct platform_device *pdev) struct nixge_priv *priv; struct net_device *ndev; struct resource *dmares; - const char *mac_addr; + const u8 *mac_addr; int err; ndev = alloc_etherdev(sizeof(*priv)); @@ -1202,10 +1202,12 @@ static int nixge_probe(struct platform_device *pdev) ndev->max_mtu = NIXGE_JUMBO_MTU; mac_addr = nixge_get_nvmem_address(&pdev->dev); - if (mac_addr && is_valid_ether_addr(mac_addr)) + if (mac_addr && is_valid_ether_addr(mac_addr)) { ether_addr_copy(ndev->dev_addr, mac_addr); - else + kfree(mac_addr); + } else { eth_hw_addr_random(ndev); + } priv = netdev_priv(ndev); priv->ndev = ndev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e874504e8b28..8667799d0069 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) void qed_l2_setup(struct qed_hwfn *p_hwfn) { - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; mutex_init(&p_hwfn->p_l2_info->lock); @@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn) { u32 i; - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; if (!p_hwfn->p_l2_info) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 74fc626b1ec1..38502815d681 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -2370,7 +2370,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) u8 flags = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { - DP_INFO(cdev, "Cannot transmit a checksumed packet\n"); + DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9854aa9139af..7870ae2a6f7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -680,7 +680,7 @@ static int qed_nic_stop(struct qed_dev *cdev) tasklet_disable(p_hwfn->sp_dpc); p_hwfn->b_sp_dpc_enabled = false; DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, - "Disabled sp taskelt [hwfn %d] at %p\n", + "Disabled sp tasklet [hwfn %d] at %p\n", i, p_hwfn->sp_dpc); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index fb7c2d1562ae..6acfd43c1a4f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -848,7 +848,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (!(qp->resp_offloaded)) { DP_NOTICE(p_hwfn, - "The responder's qp should be offloded before requester's\n"); + "The responder's qp should be offloaded before requester's\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 50b142fad6b8..1900bf7e67d1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev) } if (!found) { - event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); + event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); if (!event_node) { DP_NOTICE(edev, "qedr: Could not allocate memory for rdma work\n"); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index d33988570217..5f4e447c5dce 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) real_dev = priv->real_dev; - if (!rmnet_is_real_dev_registered(real_dev)) - return -ENODEV; - if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) goto nla_put_failure; - port = rmnet_get_port_rtnl(real_dev); + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + f.flags = port->data_format; + } else { + f.flags = 0; + } - f.flags = port->data_format; f.mask = ~0; if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d24b47b8e0b2..d118da5a10a2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq(irq); + disable_irq_nosync(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 604ae78381ae..c7aac1fc99e8 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4981,6 +4981,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) static void rtl_pll_power_up(struct rtl8169_private *tp) { rtl_generic_op(tp, tp->pll_power_ops.up); + + /* give MAC/PHY some time to resume */ + msleep(20); } static void rtl_init_pll_power_ops(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 50daad0a1482..d90a7b1f4088 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx) atomic_set(&efx->active_queues, 0); } -static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right) -{ - if ((left->match_flags ^ right->match_flags) | - ((left->flags ^ right->flags) & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) - return false; - - return memcmp(&left->outer_vid, &right->outer_vid, - sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; -} - -static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) -{ - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, - (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, - 0); - /* XXX should we randomise the initval? */ -} - /* Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are @@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, goto out_unlock; match_pri = rc; - hash = efx_ef10_filter_hash(spec); + hash = efx_filter_spec_hash(spec); is_mc_recip = efx_filter_is_mc_recipient(spec); if (is_mc_recip) bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); @@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, if (!saved_spec) { if (ins_index < 0) ins_index = i; - } else if (efx_ef10_filter_equal(spec, saved_spec)) { + } else if (efx_filter_spec_equal(spec, saved_spec)) { if (spec->priority < saved_spec->priority && spec->priority != EFX_FILTER_PRI_AUTO) { rc = -EPERM; @@ -4762,28 +4739,63 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int filter_idx) { + struct efx_filter_spec *spec, saved_spec; struct efx_ef10_filter_table *table; - struct efx_filter_spec *spec; - bool ret; + struct efx_arfs_rule *rule = NULL; + bool ret = true, force = false; + u16 arfs_id; down_read(&efx->filter_sem); table = efx->filter_state; down_write(&table->lock); spec = efx_ef10_filter_entry_spec(table, filter_idx); - if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { - ret = true; + if (!spec || spec->priority != EFX_FILTER_PRI_HINT) goto out_unlock; - } - if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, - flow_id, filter_idx)) { + spin_lock_bh(&efx->rps_hash_lock); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always return 0 to ARFS. */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, spec); + if (!rule) + /* ARFS table doesn't know of this filter, so remove it */ + goto expire; + arfs_id = rule->arfs_id; + ret = efx_rps_check_rule(rule, filter_idx, &force); + if (force) + goto expire; + if (!ret) { + spin_unlock_bh(&efx->rps_hash_lock); + goto out_unlock; + } + } + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) ret = false; - goto out_unlock; + else if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; +expire: + saved_spec = *spec; /* remove operation will kfree spec */ + spin_unlock_bh(&efx->rps_hash_lock); + /* At this point (since we dropped the lock), another thread might queue + * up a fresh insertion request (but the actual insertion will be held + * up by our possession of the filter table lock). In that case, it + * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that + * the rule is not removed by efx_rps_hash_del() below. + */ + if (ret) + ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, + filter_idx, true) == 0; + /* While we can't safely dereference rule (we dropped the lock), we can + * still test it for NULL. + */ + if (ret && rule) { + /* Expiring, so remove entry from ARFS table */ + spin_lock_bh(&efx->rps_hash_lock); + efx_rps_hash_del(efx, &saved_spec); + spin_unlock_bh(&efx->rps_hash_lock); } - - ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, - filter_idx, true) == 0; out_unlock: up_write(&table->lock); up_read(&efx->filter_sem); @@ -5265,7 +5277,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, ids = vlan->uc; } - filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { @@ -5334,7 +5346,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, int rc; u16 *id; - filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 692dd729ee2a..a4ebd8715494 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx, mutex_init(&efx->mac_lock); #ifdef CONFIG_RFS_ACCEL mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); #endif efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; @@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx) { int i; +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); @@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); } +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_filter_spec_hash(spec); + + WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because * (a) this is an infrequent control-plane operation and (b) n is small (max 64) */ diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a3140e16fcef..3f759ebdcf10 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {} #endif bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force); + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +/* @new is written to indicate if entry was newly added (true) or if an old + * entry was found and returned (false). + */ +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new); + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); +#endif + /* RSS contexts */ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 4a19c7efdf8d..c72adf8b52ea 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, { struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; - bool ret = false; + bool ret = false, force = false; + u16 arfs_id; down_write(&state->lock); + spin_lock_bh(&efx->rps_hash_lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; if (test_bit(index, table->used_bitmap) && - table->spec[index].priority == EFX_FILTER_PRI_HINT && - rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, - flow_id, index)) { - efx_farch_filter_table_clear_entry(efx, table, index); - ret = true; + table->spec[index].priority == EFX_FILTER_PRI_HINT) { + struct efx_arfs_rule *rule = NULL; + struct efx_filter_spec spec; + + efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always returned 0 to + * ARFS, so use the same to query it. + */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, &spec); + if (!rule) { + /* ARFS table doesn't know of this filter, remove it */ + force = true; + } else { + arfs_id = rule->arfs_id; + if (!efx_rps_check_rule(rule, index, &force)) + goto out_unlock; + } + } + if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, + flow_id, arfs_id)) { + if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + efx_rps_hash_del(efx, &spec); + efx_farch_filter_table_clear_entry(efx, table, index); + ret = true; + } } - +out_unlock: + spin_unlock_bh(&efx->rps_hash_lock); up_write(&state->lock); return ret; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5e379a83c729..65568925c3ef 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -733,6 +733,56 @@ struct efx_rss_context { u32 rx_indir_table[128]; }; +#ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + +/** + * struct efx_async_filter_insertion - Request to asynchronously insert a filter + * @net_dev: Reference to the netdevice + * @spec: The filter to insert + * @work: Workitem for this request + * @rxq_index: Identifies the channel for which this request was made + * @flow_id: Identifies the kernel-side flow for which this request was made + */ +struct efx_async_filter_insertion { + struct net_device *net_dev; + struct efx_filter_spec spec; + struct work_struct work; + u16 rxq_index; + u32 flow_id; +}; + +/* Maximum number of ARFS workitems that may be in flight on an efx_nic */ +#define EFX_RPS_MAX_IN_FLIGHT 8 +#endif /* CONFIG_RFS_ACCEL */ + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -850,6 +900,12 @@ struct efx_rss_context { * @rps_expire_channel: Next channel to check for expiry * @rps_expire_index: Next index to check for expiry in * @rps_expire_channel's @rps_flow_id + * @rps_slot_map: bitmap of in-flight entries in @rps_slot + * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -1004,6 +1060,11 @@ struct efx_nic { struct mutex rps_mutex; unsigned int rps_expire_channel; unsigned int rps_expire_index; + unsigned long rps_slot_map; + struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; #endif atomic_t active_queues; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 95682831484e..d2e254f2f72b 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -827,31 +827,38 @@ MODULE_PARM_DESC(rx_refill_threshold, #ifdef CONFIG_RFS_ACCEL -/** - * struct efx_async_filter_insertion - Request to asynchronously insert a filter - * @net_dev: Reference to the netdevice - * @spec: The filter to insert - * @work: Workitem for this request - * @rxq_index: Identifies the channel for which this request was made - * @flow_id: Identifies the kernel-side flow for which this request was made - */ -struct efx_async_filter_insertion { - struct net_device *net_dev; - struct efx_filter_spec spec; - struct work_struct work; - u16 rxq_index; - u32 flow_id; -}; - static void efx_filter_rfs_work(struct work_struct *data) { struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, work); struct efx_nic *efx = netdev_priv(req->net_dev); struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; int rc; - rc = efx->type->filter_insert(efx, &req->spec, false); + rc = efx->type->filter_insert(efx, &req->spec, true); + if (rc >= 0) + rc %= efx->type->max_rx_ip_filters; + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } if (rc >= 0) { /* Remember this so we can check whether to expire the filter * later. @@ -863,23 +870,23 @@ static void efx_filter_rfs_work(struct work_struct *data) if (req->spec.ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); else netif_info(efx, rx_status, efx->net_dev, - "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); } /* Release references */ + clear_bit(slot_idx, &efx->rps_slot_map); dev_put(req->net_dev); - kfree(req); } int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, @@ -887,23 +894,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; struct flow_keys fk; + int slot_idx; + bool new; + int rc; - if (flow_id == RPS_FLOW_ID_INVALID) - return -EINVAL; + /* find a free slot */ + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) + break; + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) + return -EBUSY; - if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) - return -EPROTONOSUPPORT; + if (flow_id == RPS_FLOW_ID_INVALID) { + rc = -EINVAL; + goto out_clear; + } - if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) - return -EPROTONOSUPPORT; - if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) - return -EPROTONOSUPPORT; + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } - req = kmalloc(sizeof(*req), GFP_ATOMIC); - if (!req) - return -ENOMEM; + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + req = efx->rps_slot + slot_idx; efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, rxq_index); @@ -927,12 +950,45 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, req->spec.rem_port = fk.ports.src; req->spec.loc_port = fk.ports.dst; + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ dev_hold(req->net_dev = net_dev); INIT_WORK(&req->work, efx_filter_rfs_work); req->rxq_index = rxq_index; req->flow_id = flow_id; schedule_work(&req->work); - return 0; + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); +out_clear: + clear_bit(slot_idx, &efx->rps_slot_map); + return rc; } bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index c7bff596c665..dedd40613090 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -347,7 +347,7 @@ enum power_event { #define MTL_RX_OVERFLOW_INT BIT(16) /* Default operating mode of the MAC */ -#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) /* To dump the core regs excluding the Address Registers */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a3af92ebbca8..517b1f6736a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, value |= GMAC_CORE_INIT; - /* Clear ACS bit because Ethernet switch tagging formats such as - * Broadcom tags can look like invalid LLC/SNAP packets and cause the - * hardware to truncate packets on reception. - */ - if (netdev_uses_dsa(dev)) - value &= ~GMAC_CONFIG_ACS; - if (mtu > 1500) value |= GMAC_CONFIG_2K; if (mtu > 2000) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a16931ce39d..b65e2d144698 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) + * + * llc_snap is never checked in GMAC >= 4, so this ACS + * feature is always disabled and packets need to be + * stripped manually. */ - if (unlikely(status != llc_snap)) + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; if (netif_msg_rx_status(priv)) { diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f081de4f38d7..88c12474a0c3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; - len -= ETH_FCS_LEN; + append_size = len + ETH_HLEN + ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; @@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; - append_size = rcr_size; if (num_rcr == 1) { int ptype; @@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, else skb_checksum_none_assert(skb); } else if (!(val & RCR_ENTRY_MULTI)) - append_size = len - skb->len; + append_size = append_size - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 30371274409d..28d893b93d30 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,7 +129,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x01234567 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_RX_VLAN_ENCAP BIT(2) @@ -1340,6 +1340,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); + cpsw_ale_control_set(cpsw->ale, slave_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 1); } static void soft_reset_slave(struct cpsw_slave *slave) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ecc84954c511..da07ccdf84bf 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1840,7 +1840,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto rx_handler_failed; } - ret = netdev_upper_dev_link(vf_netdev, ndev, NULL); + ret = netdev_master_upper_dev_link(vf_netdev, ndev, + NULL, NULL, NULL); if (ret != 0) { netdev_err(vf_netdev, "can not set master device %s (err = %d)\n", diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 6b127be781d9..e7ca5b5f39ed 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1288,7 +1288,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) - return net_device; + goto out; rndis_filter_query_link_speed(rndis_device, net_device); diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 9fb9b565a002..4f684cbcdc57 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -1045,7 +1045,7 @@ static int atusb_probe(struct usb_interface *interface, atusb->tx_dr.bRequest = ATUSB_TX; atusb->tx_dr.wValue = cpu_to_le16(0); - atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC); + atusb->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!atusb->tx_urb) goto fail; diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 55a22c761808..de0d7f28a181 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -1267,7 +1267,7 @@ mcr20a_probe(struct spi_device *spi) ret = mcr20a_get_platform_data(spi, pdata); if (ret < 0) { dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n"); - return ret; + goto free_pdata; } /* init reset gpio */ @@ -1275,7 +1275,7 @@ mcr20a_probe(struct spi_device *spi) ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio, GPIOF_OUT_INIT_HIGH, "reset"); if (ret) - return ret; + goto free_pdata; } /* reset mcr20a */ @@ -1291,7 +1291,8 @@ mcr20a_probe(struct spi_device *spi) hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); if (!hw) { dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto free_pdata; } /* init mcr20a local data */ @@ -1308,8 +1309,10 @@ mcr20a_probe(struct spi_device *spi) /* init buf */ lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); - if (!lp->buf) - return -ENOMEM; + if (!lp->buf) { + ret = -ENOMEM; + goto free_dev; + } mcr20a_setup_tx_spi_messages(lp); mcr20a_setup_rx_spi_messages(lp); @@ -1366,6 +1369,8 @@ mcr20a_probe(struct spi_device *spi) free_dev: ieee802154_free_hw(lp->hw); +free_pdata: + kfree(pdata); return ret; } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 9cbb0c8a896a..7de88b33d5b9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) - goto put_dev; + goto unregister; /* need to be already registered so that ->init has run and * the MAC addr is set @@ -3316,8 +3316,7 @@ del_dev: macsec_del_dev(macsec); unlink: netdev_upper_dev_unlink(real_dev, dev); -put_dev: - dev_put(real_dev); +unregister: unregister_netdevice(dev); return err; } diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 3bb6b66dc7bf..f9c25912eb98 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -720,6 +720,15 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm53xx_phy_get_stats, .probe = bcm53xx_phy_probe, +}, { + .phy_id = PHY_ID_BCM89610, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM89610", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = bcm54xx_config_init, + .ack_interrupt = bcm_phy_ack_intr, + .config_intr = bcm_phy_config_intr, } }; module_phy_driver(broadcom_drivers); @@ -741,6 +750,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCMAC131, 0xfffffff0 }, { PHY_ID_BCM5241, 0xfffffff0 }, { PHY_ID_BCM5395, 0xfffffff0 }, + { PHY_ID_BCM89610, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index c22e8e383247..25e2a099b71c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (err < 0) goto error; + /* If WOL event happened once, the LED[2] interrupt pin + * will not be cleared unless we reading the interrupt status + * register. If interrupts are in use, the normal interrupt + * handling will clear the WOL event. Clear the WOL event + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) + phy_read(phydev, MII_M1011_IEVENT); + /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, MII_88E1318S_PHY_CSIER_WOL_EIE); diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0f293ef28935..a97ac8c12c4c 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -20,6 +20,7 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/microchipphy.h> +#include <linux/delay.h> #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "Microchip LAN88XX PHY driver" @@ -30,6 +31,16 @@ struct lan88xx_priv { __u32 wolopts; }; +static int lan88xx_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS); +} + +static int lan88xx_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); +} + static int lan88xx_phy_config_intr(struct phy_device *phydev) { int rc; @@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev) return 0; } +static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, + u32 data) +{ + int val, save_page, ret = 0; + u16 buf; + + /* Save current page */ + save_page = phy_save_page(phydev); + if (save_page < 0) { + pr_warn("Failed to get current page\n"); + goto err; + } + + /* Switch to TR page */ + lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR); + + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA, + (data & 0xFFFF)); + if (ret < 0) { + pr_warn("Failed to write TR low data\n"); + goto err; + } + + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA, + (data & 0x00FF0000) >> 16); + if (ret < 0) { + pr_warn("Failed to write TR high data\n"); + goto err; + } + + /* Config control bits [15:13] of register */ + buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */ + buf |= 0x8000; /* Set [15] to Packet transmit */ + + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf); + if (ret < 0) { + pr_warn("Failed to write data in reg\n"); + goto err; + } + + usleep_range(1000, 2000);/* Wait for Data to be written */ + val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR); + if (!(val & 0x8000)) + pr_warn("TR Register[0x%X] configuration failed\n", regaddr); +err: + return phy_restore_page(phydev, save_page, ret); +} + +static void lan88xx_config_TR_regs(struct phy_device *phydev) +{ + int err; + + /* Get access to Channel 0x1, Node 0xF , Register 0x01. + * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf, + * MrvlTrFix1000Kp, MasterEnableTR bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A); + if (err < 0) + pr_warn("Failed to Set Register[0x0F82]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x06. + * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv, + * SSTrKp1000Mas bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F); + if (err < 0) + pr_warn("Failed to Set Register[0x168C]\n"); + + /* Get access to Channel b'10, Node b'1111, Register 0x11. + * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh + * bits + */ + err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620); + if (err < 0) + pr_warn("Failed to Set Register[0x17A2]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x10. + * Write 24-bit value 0xEEFFDD to register. Setting + * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000, + * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD); + if (err < 0) + pr_warn("Failed to Set Register[0x16A0]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x13. + * Write 24-bit value 0x071448 to register. Setting + * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448); + if (err < 0) + pr_warn("Failed to Set Register[0x16A6]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x12. + * Write 24-bit value 0x13132F to register. Setting + * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F); + if (err < 0) + pr_warn("Failed to Set Register[0x16A4]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x14. + * Write 24-bit value 0x0 to register. Setting eee_3level_delay, + * eee_TrKf_freeze_delay bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0); + if (err < 0) + pr_warn("Failed to Set Register[0x16A8]\n"); + + /* Get access to Channel b'01, Node b'1111, Register 0x34. + * Write 24-bit value 0x91B06C to register. Setting + * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000, + * FastMseSearchUpdGain1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C); + if (err < 0) + pr_warn("Failed to Set Register[0x0FE8]\n"); + + /* Get access to Channel b'01, Node b'1111, Register 0x3E. + * Write 24-bit value 0xC0A028 to register. Setting + * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000, + * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028); + if (err < 0) + pr_warn("Failed to Set Register[0x0FFC]\n"); + + /* Get access to Channel b'01, Node b'1111, Register 0x35. + * Write 24-bit value 0x041600 to register. Setting + * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000, + * FastMsePhChangeDelay1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600); + if (err < 0) + pr_warn("Failed to Set Register[0x0FEA]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x03. + * Write 24-bit value 0x000004 to register. Setting TrFreeze bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004); + if (err < 0) + pr_warn("Failed to Set Register[0x1686]\n"); +} + static int lan88xx_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev) phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); } +static int lan88xx_config_init(struct phy_device *phydev) +{ + int val; + + genphy_config_init(phydev); + /*Zerodetect delay enable */ + val = phy_read_mmd(phydev, MDIO_MMD_PCS, + PHY_ARDENNES_MMD_DEV_3_PHY_CFG); + val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_; + + phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG, + val); + + /* Config DSP registers */ + lan88xx_config_TR_regs(phydev); + + return 0; +} + static int lan88xx_config_aneg(struct phy_device *phydev) { lan88xx_set_mdix(phydev); @@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = { .probe = lan88xx_probe, .remove = lan88xx_remove, - .config_init = genphy_config_init, + .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, .ack_interrupt = lan88xx_phy_ack_interrupt, @@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = { .suspend = lan88xx_suspend, .resume = genphy_resume, .set_wol = lan88xx_set_wol, + .read_page = lan88xx_read_page, + .write_page = lan88xx_write_page, } }; module_phy_driver(microchip_phy_driver); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac23322a32e1..9e4ba8e80a18 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -535,8 +535,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR1, and put them in the upper half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); - if (phy_reg < 0) + if (phy_reg < 0) { + /* if there is no device, return without an error so scanning + * the bus works properly + */ + if (phy_reg == -EIO || phy_reg == -ENODEV) { + *phy_id = 0xffffffff; + return 0; + } + return -EIO; + } *phy_id = (phy_reg & 0xffff) << 16; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 0381da78d228..fd6c23f69c2f 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -125,7 +125,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, if (id->base.br_nominal) { if (id->base.br_nominal != 255) { br_nom = id->base.br_nominal * 100; - br_min = br_nom + id->base.br_nominal * id->ext.br_min; + br_min = br_nom - id->base.br_nominal * id->ext.br_min; br_max = br_nom + id->base.br_nominal * id->ext.br_max; } else if (id->ext.br_max) { br_nom = 250 * id->ext.br_max; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 1483bc7b01e1..7df07337d69c 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppox)) + goto end; + if (sp->sa_protocol != PX_PROTO_OE) goto end; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a6c6ce19eeee..ddb6bf85a59c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } +static bool __team_option_inst_tmp_find(const struct list_head *opts, + const struct team_option_inst *needle) +{ + struct team_option_inst *opt_inst; + + list_for_each_entry(opt_inst, opts, tmp_list) + if (opt_inst == needle) + return true; + return false; +} + static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int __team_port_enable_netpoll(struct team_port *port) { struct netpoll *np; int err; - if (!team->dev->npinfo) - return 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; @@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port) return err; } +static int team_port_enable_netpoll(struct team_port *port) +{ + if (!port->team->dev->npinfo) + return 0; + + return __team_port_enable_netpoll(port); +} + static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; @@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port) kfree(np); } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team_port *port) { return 0; } @@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_vids_add; } - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(port); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1907,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = __team_port_enable_netpoll(port); if (err) { __team_netpoll_cleanup(team); break; @@ -2568,6 +2584,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; + + /* dumb/evil user-space can send us duplicate opt, + * keep only the last one + */ + if (__team_option_inst_tmp_find(&opt_inst_list, + opt_inst)) + continue; + list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 28583aa0c17d..ef33950a45d9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; len = run_ebpf_filter(tun, skb, len); - - /* Trim extra bytes since we may insert vlan proto & TCI - * in tun_put_user(). - */ - len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; - if (len <= 0 || pskb_trim(skb, len)) + if (len == 0 || pskb_trim(skb, len)) goto drop; if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ca066b785e9f..42565dd33aa6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, + {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ @@ -1107,6 +1108,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1342,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, id->driver_info = (unsigned long)&qmi_wwan_info; } + /* There are devices where the same interface number can be + * configured as different functions. We should only bind to + * vendor specific functions when matching on interface number + */ + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { + dev_dbg(&intf->dev, + "Rejecting interface number match for class %02x\n", + desc->bInterfaceClass); + return -ENODEV; + } + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7b187ec7411e..770422e953f7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -147,6 +147,17 @@ struct receive_queue { struct xdp_rxq_info xdp_rxq; }; +/* Control VQ buffers: protected by the rtnl lock */ +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; + struct virtio_net_ctrl_mq mq; + u8 promisc; + u8 allmulti; + __virtio16 vid; + __virtio64 offloads; +}; + struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; @@ -192,14 +203,7 @@ struct virtnet_info { struct hlist_node node; struct hlist_node node_dead; - /* Control VQ buffers: protected by the rtnl lock */ - struct virtio_net_ctrl_hdr ctrl_hdr; - virtio_net_ctrl_ack ctrl_status; - struct virtio_net_ctrl_mq ctrl_mq; - u8 ctrl_promisc; - u8 ctrl_allmulti; - u16 ctrl_vid; - u64 ctrl_offloads; + struct control_buf *ctrl; /* Ethtool settings */ u8 duplex; @@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); - unsigned int received; + struct virtnet_info *vi = rq->vq->vdev->priv; + struct send_queue *sq; + unsigned int received, qp; bool xdp_xmit = false; virtnet_poll_cleantx(rq); @@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit) + if (xdp_xmit) { + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + + smp_processor_id(); + sq = &vi->sq[qp]; + virtqueue_kick(sq->vq); xdp_do_flush_map(); + } return received; } @@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); - vi->ctrl_status = ~0; - vi->ctrl_hdr.class = class; - vi->ctrl_hdr.cmd = cmd; + vi->ctrl->status = ~0; + vi->ctrl->hdr.class = class; + vi->ctrl->hdr.cmd = cmd; /* Add header */ - sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); + sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; /* Add return status. */ - sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); + sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); if (unlikely(!virtqueue_kick(vi->cvq))) - return vi->ctrl_status == VIRTIO_NET_OK; + return vi->ctrl->status == VIRTIO_NET_OK; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. @@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, !virtqueue_is_broken(vi->cvq)) cpu_relax(); - return vi->ctrl_status == VIRTIO_NET_OK; + return vi->ctrl->status == VIRTIO_NET_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) @@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); + vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; - vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); - vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); + vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); - sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); + sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - vi->ctrl_promisc ? "en" : "dis"); + vi->ctrl->promisc ? "en" : "dis"); - sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); + sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - vi->ctrl_allmulti ? "en" : "dis"); + vi->ctrl->allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); @@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - vi->ctrl_vid = vid; - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - vi->ctrl_vid = vid; - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { struct scatterlist sg; - vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); + vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); - sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); + sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { @@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) kfree(vi->rq); kfree(vi->sq); + kfree(vi->ctrl); } static void _free_receive_bufs(struct virtnet_info *vi) @@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); + if (!vi->ctrl) + goto err_ctrl; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; @@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) err_rq: kfree(vi->sq); err_sq: + kfree(vi->ctrl); +err_ctrl: return -ENOMEM; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e04937f44f33..9ebe2a689966 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union { void *ptr; struct ethhdr *eth; + struct vlan_ethhdr *veth; struct iphdr *ipv4; struct ipv6hdr *ipv6; struct tcphdr *tcp; @@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) return 0; + if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || + skb->protocol == cpu_to_be16(ETH_P_8021AD)) + hlen = sizeof(struct vlan_ethhdr); + else + hlen = sizeof(struct ethhdr); + hdr.eth = eth_hdr(skb); if (gdesc->rcd.v4) { - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); - hdr.ptr += sizeof(struct ethhdr); + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); + hdr.ptr += hlen; BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); hlen = hdr.ipv4->ihl << 2; hdr.ptr += hdr.ipv4->ihl << 2; } else if (gdesc->rcd.v6) { - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); - hdr.ptr += sizeof(struct ethhdr); + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); + hdr.ptr += hlen; /* Use an estimated value, since we also need to handle * TSO case. */ diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 59ec34052a65..a3326463b71f 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 9277f4c2bfeb..94e177d7c9b5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -459,7 +459,7 @@ static void brcmf_fw_free_request(struct brcmf_fw_request *req) kfree(req); } -static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) +static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; struct brcmf_fw_item *cur; @@ -498,13 +498,10 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); cur->nv_data.data = nvram; cur->nv_data.len = nvram_length; - return; + return 0; fail: - brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); - fwctx->done(fwctx->dev, -ENOENT, NULL); - brcmf_fw_free_request(fwctx->req); - kfree(fwctx); + return -ENOENT; } static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) @@ -553,20 +550,27 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, fw ? "" : "not "); - if (fw) { - if (cur->type == BRCMF_FW_TYPE_BINARY) - cur->binary = fw; - else if (cur->type == BRCMF_FW_TYPE_NVRAM) - brcmf_fw_request_nvram_done(fw, fwctx); - else - release_firmware(fw); - } else if (cur->type == BRCMF_FW_TYPE_NVRAM) { - brcmf_fw_request_nvram_done(NULL, fwctx); - } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) { + if (!fw) ret = -ENOENT; + + switch (cur->type) { + case BRCMF_FW_TYPE_NVRAM: + ret = brcmf_fw_request_nvram_done(fw, fwctx); + break; + case BRCMF_FW_TYPE_BINARY: + cur->binary = fw; + break; + default: + /* something fishy here so bail out early */ + brcmf_err("unknown fw type: %d\n", cur->type); + release_firmware(fw); + ret = -EINVAL; goto fail; } + if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) + goto fail; + do { if (++fwctx->curpos == fwctx->req->n_items) { ret = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 7af3a0f51b77..a17c4a79b8d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -749,13 +750,9 @@ struct iwl_scan_req_umac { } __packed; #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) -#define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \ - 4 * sizeof(u8)) -#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(u8) - sizeof(__le16)) -#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32) - 2 * sizeof(u8) - \ - sizeof(__le16)) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 48 +#define IWL_SCAN_REQ_UMAC_SIZE_V6 44 +#define IWL_SCAN_REQ_UMAC_SIZE_V1 36 /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 8928613e033e..ca0174680af9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -76,6 +76,7 @@ #include "iwl-io.h" #include "iwl-csr.h" #include "fw/acpi.h" +#include "fw/api/nvm-reg.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -146,8 +147,8 @@ static const u8 iwl_ext_nvm_channels[] = { 149, 153, 157, 161, 165, 169, 173, 177, 181 }; -#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) -#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) +#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) +#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define NUM_2GHZ_CHANNELS 14 #define NUM_2GHZ_CHANNELS_EXT 14 #define FIRST_2GHZ_HT_MINUS 5 @@ -301,11 +302,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, const u8 *nvm_chan; if (cfg->nvm_type != IWL_NVM_EXT) { - num_of_ch = IWL_NUM_CHANNELS; + num_of_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = &iwl_nvm_channels[0]; num_2ghz_channels = NUM_2GHZ_CHANNELS; } else { - num_of_ch = IWL_NUM_CHANNELS_EXT; + num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = &iwl_ext_nvm_channels[0]; num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; } @@ -720,12 +721,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS, + IWL_NVM_NUM_CHANNELS, GFP_KERNEL); else data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS_EXT, + IWL_NVM_NUM_CHANNELS_EXT, GFP_KERNEL); if (!data) return NULL; @@ -842,24 +843,34 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, return flags; } +struct regdb_ptrs { + struct ieee80211_wmm_rule *rule; + u32 token; +}; + struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, - int num_of_ch, __le32 *channels, u16 fw_mcc) + int num_of_ch, __le32 *channels, u16 fw_mcc, + u16 geo_info) { int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? iwl_ext_nvm_channels : iwl_nvm_channels; - struct ieee80211_regdomain *regd; - int size_of_regd; + struct ieee80211_regdomain *regd, *copy_rd; + int size_of_regd, regd_to_copy, wmms_to_copy; + int size_of_wmms = 0; struct ieee80211_reg_rule *rule; + struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; + struct regdb_ptrs *regdb_ptrs; enum nl80211_band band; int center_freq, prev_center_freq = 0; - int valid_rules = 0; + int valid_rules = 0, n_wmms = 0; + int i; bool new_rule; int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? - IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; + IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); @@ -875,10 +886,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, sizeof(struct ieee80211_regdomain) + num_of_ch * sizeof(struct ieee80211_reg_rule); - regd = kzalloc(size_of_regd, GFP_KERNEL); + if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) + size_of_wmms = + num_of_ch * sizeof(struct ieee80211_wmm_rule); + + regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); + regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); + if (!regdb_ptrs) { + copy_rd = ERR_PTR(-ENOMEM); + goto out; + } + + /* set alpha2 from FW. */ + regd->alpha2[0] = fw_mcc >> 8; + regd->alpha2[1] = fw_mcc & 0xff; + + wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); + for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = (ch_idx < NUM_2GHZ_CHANNELS) ? @@ -927,14 +954,66 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); + + if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || + band == NL80211_BAND_2GHZ) + continue; + + if (!reg_query_regdb_wmm(regd->alpha2, center_freq, + ®db_ptrs[n_wmms].token, wmm_rule)) { + /* Add only new rules */ + for (i = 0; i < n_wmms; i++) { + if (regdb_ptrs[i].token == + regdb_ptrs[n_wmms].token) { + rule->wmm_rule = regdb_ptrs[i].rule; + break; + } + } + if (i == n_wmms) { + rule->wmm_rule = wmm_rule; + regdb_ptrs[n_wmms++].rule = wmm_rule; + wmm_rule++; + } + } } regd->n_reg_rules = valid_rules; + regd->n_wmm_rules = n_wmms; - /* set alpha2 from FW. */ - regd->alpha2[0] = fw_mcc >> 8; - regd->alpha2[1] = fw_mcc & 0xff; + /* + * Narrow down regdom for unused regulatory rules to prevent hole + * between reg rules to wmm rules. + */ + regd_to_copy = sizeof(struct ieee80211_regdomain) + + valid_rules * sizeof(struct ieee80211_reg_rule); + + wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; + + copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); + if (!copy_rd) { + copy_rd = ERR_PTR(-ENOMEM); + goto out; + } + + memcpy(copy_rd, regd, regd_to_copy); + memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, + wmms_to_copy); + + d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); + s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); + + for (i = 0; i < regd->n_reg_rules; i++) { + if (!regd->reg_rules[i].wmm_rule) + continue; + + copy_rd->reg_rules[i].wmm_rule = d_wmm + + (regd->reg_rules[i].wmm_rule - s_wmm) / + sizeof(struct ieee80211_wmm_rule); + } - return regd; +out: + kfree(regdb_ptrs); + kfree(regd); + return copy_rd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 306736c7a042..3071a23b7606 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -101,12 +101,14 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, * * This function parses the regulatory channel data received as a * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, - * to be fed into the regulatory core. An ERR_PTR is returned on error. + * to be fed into the regulatory core. In case the geo_info is set handle + * accordingly. An ERR_PTR is returned on error. * If not given to the regulatory core, the user is responsible for freeing * the regdomain returned here with kfree. */ struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, - int num_of_ch, __le32 *channels, u16 fw_mcc); + int num_of_ch, __le32 *channels, u16 fw_mcc, + u16 geo_info); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 51b30424575b..90f8c89ea59c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -311,7 +311,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, - __le16_to_cpu(resp->mcc)); + __le16_to_cpu(resp->mcc), + __le16_to_cpu(resp->geo_info)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 96d26cfae90b..4a017a0d71ea 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3236,6 +3236,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); NL_SET_BAD_ATTR(info->extack, info->attrs[HWSIM_ATTR_PERM_ADDR]); + kfree(hwname); return -EINVAL; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 8b6b07a936f5..b026e80940a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) { - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; - - /* override ant_num / ant_path */ - if (mod_params->ant_sel) { - rtlpriv->btcoexist.btc_info.ant_num = - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); - - rtlpriv->btcoexist.btc_info.single_ant_path = - (mod_params->ant_sel == 1 ? 0 : 1); - } return rtlpriv->btcoexist.btc_info.single_ant_path; } @@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 num; if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) @@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) else num = 1; - /* override ant_num / ant_path */ - if (mod_params->ant_sel) - num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1; - return num; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index e7bbbc95cdb1..b4f3f91b590e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) return false; } + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); + bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); @@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); rtlpriv->btcoexist.btc_info.single_ant_path = - (value & 0x40); /*0xc3[6]*/ + (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/ } else { rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; - rtlpriv->btcoexist.btc_info.single_ant_path = 0; + rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN; } /* override ant_num / ant_path */ if (mod_params->ant_sel) { rtlpriv->btcoexist.btc_info.ant_num = - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); + (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2); rtlpriv->btcoexist.btc_info.single_ant_path = - (mod_params->ant_sel == 1 ? 0 : 1); + (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index d27e33960e77..ce1754054a07 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2823,6 +2823,11 @@ enum bt_ant_num { ANT_X1 = 1, }; +enum bt_ant_path { + ANT_MAIN = 0, + ANT_AUX = 1, +}; + enum bt_co_type { BT_2WIRE = 0, BT_ISSC_3WIRE = 1, diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 85997184e047..9d36473dc2a2 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -103,8 +103,7 @@ config NVDIMM_DAX Select Y if unsure config OF_PMEM - # FIXME: make tristate once OF_NUMA dependency removed - bool "Device-tree support for persistent memory regions" + tristate "Device-tree support for persistent memory regions" depends on OF default LIBNVDIMM help diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index e00d45522b80..8d348b22ba45 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); + int rc = validate_dimm(ndd), cmd_rc = 0; struct nd_cmd_get_config_data_hdr *cmd; struct nvdimm_bus_descriptor *nd_desc; - int rc = validate_dimm(ndd); u32 max_cmd_size, config_size; size_t offset; @@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) cmd->in_offset = offset; rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), ND_CMD_GET_CONFIG_DATA, cmd, - cmd->in_length + sizeof(*cmd), NULL); - if (rc || cmd->status) { - rc = -ENXIO; + cmd->in_length + sizeof(*cmd), &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); @@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len) { - int rc = validate_dimm(ndd); size_t max_cmd_size, buf_offset; struct nd_cmd_set_config_hdr *cmd; + int rc = validate_dimm(ndd), cmd_rc = 0; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; @@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, for (buf_offset = 0; len; len -= cmd->in_length, buf_offset += cmd->in_length) { size_t cmd_size; - u32 *status; cmd->in_offset = offset + buf_offset; cmd->in_length = min(max_cmd_size, len); @@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, /* status is output in the last 4-bytes of the command buffer */ cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); - status = ((void *) cmd) + cmd_size - sizeof(u32); rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), - ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); - if (rc || *status) { - rc = rc ? rc : -ENXIO; + ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } } diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 85013bad35de..0a701837dfc0 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) */ memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; - ndr_desc.numa_node = of_node_to_nid(np); + ndr_desc.numa_node = dev_to_node(&pdev->dev); ndr_desc.res = &pdev->resource[i]; ndr_desc.of_node = np; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index b979cf3bce65..88a8b5916624 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -27,7 +27,7 @@ config NVME_FABRICS config NVME_RDMA tristate "NVM Express over Fabrics RDMA host driver" - depends on INFINIBAND && BLOCK + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK select NVME_CORE select NVME_FABRICS select SG_POOL diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9df4f71e58ca..99b857e5a7a9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -99,6 +99,7 @@ static struct class *nvme_subsys_class; static void nvme_ns_remove(struct nvme_ns *ns); static int nvme_revalidate_disk(struct gendisk *disk); +static void nvme_put_subsystem(struct nvme_subsystem *subsys); int nvme_reset_ctrl(struct nvme_ctrl *ctrl) { @@ -117,7 +118,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) ret = nvme_reset_ctrl(ctrl); if (!ret) { flush_work(&ctrl->reset_work); - if (ctrl->state != NVME_CTRL_LIVE) + if (ctrl->state != NVME_CTRL_LIVE && + ctrl->state != NVME_CTRL_ADMIN_ONLY) ret = -ENETRESET; } @@ -350,6 +352,7 @@ static void nvme_free_ns_head(struct kref *ref) ida_simple_remove(&head->subsys->ns_ida, head->instance); list_del_init(&head->entry); cleanup_srcu_struct(&head->srcu); + nvme_put_subsystem(head->subsys); kfree(head); } @@ -764,6 +767,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ret = PTR_ERR(meta); goto out_unmap; } + req->cmd_flags |= REQ_INTEGRITY; } } @@ -2860,6 +2864,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, goto out_cleanup_srcu; list_add_tail(&head->entry, &ctrl->subsys->nsheads); + + kref_get(&ctrl->subsys->ref); + return head; out_cleanup_srcu: cleanup_srcu_struct(&head->srcu); @@ -2997,31 +3004,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (nvme_init_ns_head(ns, nsid, id)) goto out_free_id; nvme_setup_streams_ns(ctrl, ns); - -#ifdef CONFIG_NVME_MULTIPATH - /* - * If multipathing is enabled we need to always use the subsystem - * instance number for numbering our devices to avoid conflicts - * between subsystems that have multiple controllers and thus use - * the multipath-aware subsystem node and those that have a single - * controller and use the controller node directly. - */ - if (ns->head->disk) { - sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, - ctrl->cntlid, ns->head->instance); - flags = GENHD_FL_HIDDEN; - } else { - sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, - ns->head->instance); - } -#else - /* - * But without the multipath code enabled, multiple controller per - * subsystems are visible as devices and thus we cannot use the - * subsystem instance. - */ - sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); -#endif + nvme_set_disk_name(disk_name, ns, ctrl, &flags); if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { if (nvme_nvm_register(ns, disk_name, node)) { diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 124c458806df..7ae732a77fe8 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->transport); opts->transport = p; break; case NVMF_OPT_NQN: @@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->subsysnqn); opts->subsysnqn = p; nqnlen = strlen(opts->subsysnqn); if (nqnlen >= NVMF_NQN_SIZE) { @@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->traddr); opts->traddr = p; break; case NVMF_OPT_TRSVCID: @@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->trsvcid); opts->trsvcid = p; break; case NVMF_OPT_QUEUE_SIZE: @@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -EINVAL; goto out; } + nvmf_host_put(opts->host); opts->host = nvmf_host_add(p); kfree(p); if (!opts->host) { @@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->host_traddr); opts->host_traddr = p; break; case NVMF_OPT_HOST_ID: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 956e0b8e9c4d..d7b664ae5923 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -15,10 +15,32 @@ #include "nvme.h" static bool multipath = true; -module_param(multipath, bool, 0644); +module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +/* + * If multipathing is enabled we need to always use the subsystem instance + * number for numbering our devices to avoid conflicts between subsystems that + * have multiple controllers and thus use the multipath-aware subsystem node + * and those that have a single controller and use the controller node + * directly. + */ +void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, + struct nvme_ctrl *ctrl, int *flags) +{ + if (!multipath) { + sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); + } else if (ns->head->disk) { + sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, + ctrl->cntlid, ns->head->instance); + *flags = GENHD_FL_HIDDEN; + } else { + sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, + ns->head->instance); + } +} + void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 061fecfd44f5..17d2f7cf3fed 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -84,6 +84,11 @@ enum nvme_quirks { * Supports the LighNVM command set if indicated in vs[1]. */ NVME_QUIRK_LIGHTNVM = (1 << 6), + + /* + * Set MEDIUM priority on SQ creation + */ + NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), }; /* @@ -436,6 +441,8 @@ extern const struct attribute_group nvme_ns_id_attr_group; extern const struct block_device_operations nvme_ns_head_ops; #ifdef CONFIG_NVME_MULTIPATH +void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, + struct nvme_ctrl *ctrl, int *flags); void nvme_failover_req(struct request *req); bool nvme_req_needs_failover(struct request *req, blk_status_t error); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); @@ -461,6 +468,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) } #else +/* + * Without the multipath code enabled, multiple controller per subsystems are + * visible as devices and thus we cannot use the subsystem instance. + */ +static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, + struct nvme_ctrl *ctrl, int *flags) +{ + sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); +} + static inline void nvme_failover_req(struct request *req) { } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fbc71fac6f1e..17a0190bd88f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1093,10 +1093,19 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq) { + struct nvme_ctrl *ctrl = &dev->ctrl; struct nvme_command c; int flags = NVME_QUEUE_PHYS_CONTIG; /* + * Some drives have a bug that auto-enables WRRU if MEDIUM isn't + * set. Since URGENT priority is zeroes, it makes all queues + * URGENT. + */ + if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) + flags |= NVME_SQ_PRIO_MEDIUM; + + /* * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ @@ -2701,7 +2710,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ - .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_MEDIUM_PRIO_SQ }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 5f4f8b16685f..3c7b61ddb0d1 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP config NVME_TARGET_RDMA tristate "NVMe over Fabrics RDMA target support" - depends on INFINIBAND + depends on INFINIBAND && INFINIBAND_ADDR_TRANS depends on NVME_TARGET select SGL_ALLOC help diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 31fdfba556a8..27a8561c0cb9 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) nvme_stop_ctrl(&ctrl->ctrl); nvme_loop_shutdown_ctrl(ctrl); + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + /* state change failure should never happen */ + WARN_ON_ONCE(1); + return; + } + ret = nvme_loop_configure_admin_queue(ctrl); if (ret) goto out_disable; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 84aa9d676375..6da20b9688f7 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void) int offset; const char *p, *q, *options = NULL; int l; - const struct earlycon_id *match; + const struct earlycon_id **p_match; const void *fdt = initial_boot_params; offset = fdt_path_offset(fdt, "/chosen"); @@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void) return 0; } - for (match = __earlycon_table; match < __earlycon_table_end; match++) { + for (p_match = __earlycon_table; p_match < __earlycon_table_end; + p_match++) { + const struct earlycon_id *match = *p_match; + if (!match->compatible[0]) continue; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index b35fe88f1851..7baa53e5b1d7 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -102,12 +102,28 @@ static DEFINE_IDR(ovcs_idr); static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain); +/** + * of_overlay_notifier_register() - Register notifier for overlay operations + * @nb: Notifier block to register + * + * Register for notification on overlay operations on device tree nodes. The + * reported actions definied by @of_reconfig_change. The notifier callback + * furthermore receives a pointer to the affected device tree node. + * + * Note that a notifier callback is not supposed to store pointers to a device + * tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the + * respective node it received. + */ int of_overlay_notifier_register(struct notifier_block *nb) { return blocking_notifier_chain_register(&overlay_notify_chain, nb); } EXPORT_SYMBOL_GPL(of_overlay_notifier_register); +/** + * of_overlay_notifier_register() - Unregister notifier for overlay operations + * @nb: Notifier block to unregister + */ int of_overlay_notifier_unregister(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&overlay_notify_chain, nb); @@ -671,17 +687,13 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs) of_node_put(ovcs->fragments[i].overlay); } kfree(ovcs->fragments); - /* - * TODO - * - * would like to: kfree(ovcs->overlay_tree); - * but can not since drivers may have pointers into this data - * - * would like to: kfree(ovcs->fdt); - * but can not since drivers may have pointers into this data + * There should be no live pointers into ovcs->overlay_tree and + * ovcs->fdt due to the policy that overlay notifiers are not allowed + * to retain pointers into the overlay devicetree. */ - + kfree(ovcs->overlay_tree); + kfree(ovcs->fdt); kfree(ovcs); } diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index acba1f56af3e..126cf19e869b 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1263,7 +1263,7 @@ static struct parisc_driver ccio_driver __refdata = { * I/O Page Directory, the resource map, and initalizing the * U2/Uturn chip into virtual mode. */ -static void +static void __init ccio_ioc_init(struct ioc *ioc) { int i; diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c index a6b88c7f6e3e..d2970a009eb5 100644 --- a/drivers/pci/dwc/pcie-kirin.c +++ b/drivers/pci/dwc/pcie-kirin.c @@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) return ret; kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, - "reset-gpio", 0); + "reset-gpios", 0); if (kirin_pcie->gpio_id_reset < 0) return -ENODEV; diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index b04d37b3c5de..9abf549631b4 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -29,6 +29,7 @@ #define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 #define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) #define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2 #define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 #define PCIE_CORE_LINK_L0S_ENTRY BIT(0) #define PCIE_CORE_LINK_TRAINING BIT(5) @@ -100,7 +101,8 @@ #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) #define PCIE_ISR1_FLUSH BIT(5) -#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) +#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) +#define PCIE_ISR1_ALL_MASK GENMASK(11, 4) #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) @@ -172,8 +174,6 @@ #define PCIE_CONFIG_WR_TYPE0 0xa #define PCIE_CONFIG_WR_TYPE1 0xb -/* PCI_BDF shifts 8bit, so we need extra 4bit shift */ -#define PCIE_BDF(dev) (dev << 4) #define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) #define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) #define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) @@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | - PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; + (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ << + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT); advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); /* Program PCIe Control 2 to disable strict ordering */ @@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, u32 reg; int ret; - if (PCI_SLOT(devfn) != 0) { + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } @@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, advk_writel(pcie, reg, PIO_CTRL); /* Program the address registers */ - reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); + reg = PCIE_CONF_ADDR(bus->number, devfn, where); advk_writel(pcie, reg, PIO_ADDR_LS); advk_writel(pcie, 0, PIO_ADDR_MS); @@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int offset; int ret; - if (PCI_SLOT(devfn) != 0) + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) return PCIBIOS_DEVICE_NOT_FOUND; if (where % size) @@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d) irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 mask; - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - mask |= PCIE_ISR0_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask |= PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); } static void advk_pcie_irq_unmask(struct irq_data *d) @@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d) irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 mask; - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); } static int advk_pcie_irq_map(struct irq_domain *h, @@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) static void advk_pcie_handle_int(struct advk_pcie *pcie) { - u32 val, mask, status; + u32 isr0_val, isr0_mask, isr0_status; + u32 isr1_val, isr1_mask, isr1_status; int i, virq; - val = advk_readl(pcie, PCIE_ISR0_REG); - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - status = val & ((~mask) & PCIE_ISR0_ALL_MASK); + isr0_val = advk_readl(pcie, PCIE_ISR0_REG); + isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); + + isr1_val = advk_readl(pcie, PCIE_ISR1_REG); + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); - if (!status) { - advk_writel(pcie, val, PCIE_ISR0_REG); + if (!isr0_status && !isr1_status) { + advk_writel(pcie, isr0_val, PCIE_ISR0_REG); + advk_writel(pcie, isr1_val, PCIE_ISR1_REG); return; } /* Process MSI interrupts */ - if (status & PCIE_ISR0_MSI_INT_PENDING) + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) advk_pcie_handle_msi(pcie); /* Process legacy interrupts */ for (i = 0; i < PCI_NUM_INTX; i++) { - if (!(status & PCIE_ISR0_INTX_ASSERT(i))) + if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) continue; - advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), - PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), + PCIE_ISR1_REG); virq = irq_find_mapping(pcie->irq_domain, i); generic_handle_irq(virq); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 6ace47099fc5..b9a131137e64 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev) * devices should not be touched during freeze/thaw transitions, * however. */ - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + if (!dev_pm_smart_suspend_and_suspended(dev)) { pm_runtime_resume(dev); + pci_dev->state_saved = false; + } - pci_dev->state_saved = false; if (pm->freeze) { int error; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e597655a5643..dbfe7c4f3776 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1910,7 +1910,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) EXPORT_SYMBOL(pci_pme_active); /** - * pci_enable_wake - enable PCI device as wakeup event source + * __pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events * @enable: True to enable event generation; false to disable @@ -1928,7 +1928,7 @@ EXPORT_SYMBOL(pci_pme_active); * Error code depending on the platform is returned if both the platform and * the native mechanism fail to enable the generation of wake-up events */ -int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) +static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; @@ -1969,6 +1969,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) return ret; } + +/** + * pci_enable_wake - change wakeup settings for a PCI device + * @pci_dev: Target device + * @state: PCI state from which device will issue wakeup events + * @enable: Whether or not to enable event generation + * + * If @enable is set, check device_may_wakeup() for the device before calling + * __pci_enable_wake() for it. + */ +int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) +{ + if (enable && !device_may_wakeup(&pci_dev->dev)) + return -EINVAL; + + return __pci_enable_wake(pci_dev, state, enable); +} EXPORT_SYMBOL(pci_enable_wake); /** @@ -1981,9 +1998,9 @@ EXPORT_SYMBOL(pci_enable_wake); * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI * ordering constraints. * - * This function only returns error code if the device is not capable of - * generating PME# from both D3_hot and D3_cold, and the platform is unable to - * enable wake-up power for it. + * This function only returns error code if the device is not allowed to wake + * up the system from sleep or it is not capable of generating PME# from both + * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. */ int pci_wake_from_d3(struct pci_dev *dev, bool enable) { @@ -2114,7 +2131,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) dev->runtime_d3cold = target_state == PCI_D3cold; - pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); + __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); @@ -2138,16 +2155,16 @@ bool pci_dev_run_wake(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; - if (device_can_wakeup(&dev->dev)) - return true; - if (!dev->pme_support) return false; /* PME-capable in principle, but not from the target power state */ - if (!pci_pme_capable(dev, pci_target_state(dev, false))) + if (!pci_pme_capable(dev, pci_target_state(dev, true))) return false; + if (device_can_wakeup(&dev->dev)) + return true; + while (bus->parent) { struct pci_dev *bridge = bus->self; @@ -5273,11 +5290,11 @@ void pcie_print_link_status(struct pci_dev *dev) bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); if (bw_avail >= bw_cap) - pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n", + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", bw_cap / 1000, bw_cap % 1000, PCIE_SPEED2STR(speed_cap), width_cap); else - pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", bw_avail / 1000, bw_avail % 1000, PCIE_SPEED2STR(speed), width, limiting_dev ? pci_name(limiting_dev) : "<unknown>", diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index b1ae1618fefe..fee9225ca559 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1622,22 +1622,30 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) if (!need_valid_mask) { irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, - chip->ngpio, NUMA_NO_NODE); + community->npins, NUMA_NO_NODE); if (irq_base < 0) { dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); return irq_base; } - } else { - irq_base = 0; } - ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, + ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add IRQ chip\n"); return ret; } + if (!need_valid_mask) { + for (i = 0; i < community->ngpio_ranges; i++) { + range = &community->gpio_ranges[i]; + + irq_domain_associate_many(chip->irq.domain, irq_base, + range->base, range->npins); + irq_base += range->npins; + } + } + gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq, chv_gpio_irq_handler); return 0; diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 8870a4100164..fee3435a6f15 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -36,6 +36,27 @@ .npins = ((e) - (s) + 1), \ } +#define SPTH_GPP(r, s, e, g) \ + { \ + .reg_num = (r), \ + .base = (s), \ + .size = ((e) - (s) + 1), \ + .gpio_base = (g), \ + } + +#define SPTH_COMMUNITY(b, s, e, g) \ + { \ + .barno = (b), \ + .padown_offset = SPT_PAD_OWN, \ + .padcfglock_offset = SPT_PADCFGLOCK, \ + .hostown_offset = SPT_HOSTSW_OWN, \ + .ie_offset = SPT_GPI_IE, \ + .pin_base = (s), \ + .npins = ((e) - (s) + 1), \ + .gpps = (g), \ + .ngpps = ARRAY_SIZE(g), \ + } + /* Sunrisepoint-LP */ static const struct pinctrl_pin_desc sptlp_pins[] = { /* GPP_A */ @@ -531,10 +552,28 @@ static const struct intel_function spth_functions[] = { FUNCTION("i2c2", spth_i2c2_groups), }; +static const struct intel_padgroup spth_community0_gpps[] = { + SPTH_GPP(0, 0, 23, 0), /* GPP_A */ + SPTH_GPP(1, 24, 47, 24), /* GPP_B */ +}; + +static const struct intel_padgroup spth_community1_gpps[] = { + SPTH_GPP(0, 48, 71, 48), /* GPP_C */ + SPTH_GPP(1, 72, 95, 72), /* GPP_D */ + SPTH_GPP(2, 96, 108, 96), /* GPP_E */ + SPTH_GPP(3, 109, 132, 120), /* GPP_F */ + SPTH_GPP(4, 133, 156, 144), /* GPP_G */ + SPTH_GPP(5, 157, 180, 168), /* GPP_H */ +}; + +static const struct intel_padgroup spth_community3_gpps[] = { + SPTH_GPP(0, 181, 191, 192), /* GPP_I */ +}; + static const struct intel_community spth_communities[] = { - SPT_COMMUNITY(0, 0, 47), - SPT_COMMUNITY(1, 48, 180), - SPT_COMMUNITY(2, 181, 191), + SPTH_COMMUNITY(0, 0, 47, spth_community0_gpps), + SPTH_COMMUNITY(1, 48, 180, spth_community1_gpps), + SPTH_COMMUNITY(2, 181, 191, spth_community3_gpps), }; static const struct intel_pinctrl_soc_data spth_soc_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 4b91ff74779b..99a6ceac8e53 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c @@ -898,7 +898,7 @@ static struct meson_bank meson_axg_periphs_banks[] = { static struct meson_bank meson_axg_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIOAO_9, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = { diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 39d06dd1f63a..bc309c5327ff 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -154,7 +154,7 @@ config DELL_LAPTOP depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL || RFKILL = n depends on SERIO_I8042 - select DELL_SMBIOS + depends on DELL_SMBIOS select POWER_SUPPLY select LEDS_CLASS select NEW_LEDS diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index d4aeac3477f5..f086469ea740 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c @@ -178,8 +178,10 @@ static int asus_wireless_remove(struct acpi_device *adev) { struct asus_wireless_data *data = acpi_driver_data(adev); - if (data->wq) + if (data->wq) { + devm_led_classdev_unregister(&adev->dev, &data->led); destroy_workqueue(data->wq); + } return 0; } diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9d27016c899e..0434ab7b6497 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, tx->callback = dma_xfer_callback; tx->callback_param = req; - req->dmach = chan; - req->sync = sync; req->status = DMA_IN_PROGRESS; - init_completion(&req->req_comp); kref_get(&req->refcount); cookie = dmaengine_submit(tx); @@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, if (!req) return -ENOMEM; - kref_init(&req->refcount); - ret = get_dma_channel(priv); if (ret) { kfree(req); return ret; } + chan = priv->dmach; + + kref_init(&req->refcount); + init_completion(&req->req_comp); + req->dir = dir; + req->filp = filp; + req->priv = priv; + req->dmach = chan; + req->sync = sync; /* * If parameter loc_addr != NULL, we are transferring data from/to @@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, xfer->offset, xfer->length); } - req->dir = dir; - req->filp = filp; - req->priv = priv; - chan = priv->dmach; - nents = dma_map_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); if (nents == 0) { diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 8e70a627e0bb..cbbafdcaaecb 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -1083,6 +1083,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } + of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -1100,6 +1101,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } + of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 6d9c5832ce47..a9609d971f7f 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1163,7 +1163,7 @@ int rproc_trigger_recovery(struct rproc *rproc) if (ret) return ret; - ret = rproc_stop(rproc, false); + ret = rproc_stop(rproc, true); if (ret) goto unlock_mutex; @@ -1316,7 +1316,7 @@ void rproc_shutdown(struct rproc *rproc) if (!atomic_dec_and_test(&rproc->power)) goto out; - ret = rproc_stop(rproc, true); + ret = rproc_stop(rproc, false); if (ret) { atomic_inc(&rproc->power); goto out; diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index 64b6de9763ee..1efdf9ff8679 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void) unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } module_exit(rpmsg_chrdev_exit); + +MODULE_ALIAS("rpmsg:rpmsg_chrdev"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index 304e891e35fc..60f2250fd96b 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) { - long rc = OPAL_BUSY; + s64 rc = OPAL_BUSY; int retries = 10; u32 y_m_d; u64 h_m_s_ms; @@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (retries-- && (rc == OPAL_HARDWARE - || rc == OPAL_INTERNAL_ERROR)) - msleep(10); - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) - break; + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { + if (retries--) { + msleep(10); /* Wait 10ms before retry */ + rc = OPAL_BUSY; /* go around again */ + } + } } if (rc != OPAL_SUCCESS) @@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) { - long rc = OPAL_BUSY; + s64 rc = OPAL_BUSY; int retries = 10; u32 y_m_d = 0; u64 h_m_s_ms = 0; tm_to_opal(tm, &y_m_d, &h_m_s_ms); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_write(y_m_d, h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (retries-- && (rc == OPAL_HARDWARE - || rc == OPAL_INTERNAL_ERROR)) - msleep(10); - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) - break; + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { + if (retries--) { + msleep(10); /* Wait 10ms before retry */ + rc = OPAL_BUSY; /* go around again */ + } + } } return rc == OPAL_SUCCESS ? 0 : -EIO; diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 62f5f04d8f61..5e963fe0e38d 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, int dasd_alias_add_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - struct alias_lcu *lcu; + __u8 uaddr = private->uid.real_unit_addr; + struct alias_lcu *lcu = private->lcu; unsigned long flags; int rc; - lcu = private->lcu; rc = 0; spin_lock_irqsave(&lcu->lock, flags); + /* + * Check if device and lcu type differ. If so, the uac data may be + * outdated and needs to be updated. + */ + if (private->uid.type != lcu->uac->unit[uaddr].ua_type) { + lcu->flags |= UPDATE_PENDING; + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "uid type mismatch - trigger rescan"); + } if (!(lcu->flags & UPDATE_PENDING)) { rc = _add_device_to_lcu(lcu, device, device); if (rc) diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index f035c2f25d35..131f1989f6f3 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -27,7 +27,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/vtoc.h> -#include <asm/diag.h> #include "dasd_int.h" #include "dasd_diag.h" diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 5f8d9ea69ebd..eceba3858cef 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized; * Used to keep track of the size of the event masks. Qemu until version 2.11 * only supports 4 and needs a workaround. */ -bool sclp_mask_compat_mode; +bool sclp_mask_compat_mode __section(.data); void sclp_early_wait_irq(void) { diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 6652a49a49b1..9029804dcd22 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) { + struct channel_path *chp; struct chp_link link; struct chp_id chpid; int status; @@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) chpid.id = sei_area->rsid; /* allocate a new channel path structure, if needed */ status = chp_get_status(chpid); - if (status < 0) - chp_new(chpid); - else if (!status) + if (!status) return; + + if (status < 0) { + chp_new(chpid); + } else { + chp = chpid_to_chp(chpid); + mutex_lock(&chp->lock); + chp_update_desc(chp); + mutex_unlock(&chp->lock); + } memset(&link, 0, sizeof(struct chp_link)); link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index ff6963ad6e39..3c800642134e 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) int ccode; __u8 lpm; unsigned long flags; + int ret; sch = private->sch; spin_lock_irqsave(sch->lock, flags); private->state = VFIO_CCW_STATE_BUSY; - spin_unlock_irqrestore(sch->lock, flags); orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); @@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) * Initialize device status information */ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; - return 0; + ret = 0; + break; case 1: /* Status pending */ case 2: /* Busy */ - return -EBUSY; + ret = -EBUSY; + break; case 3: /* Device/path not operational */ { lpm = orb->cmd.lpm; @@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private) sch->lpm = 0; if (cio_update_schib(sch)) - return -ENODEV; - - return sch->lpm ? -EACCES : -ENODEV; + ret = -ENODEV; + else + ret = sch->lpm ? -EACCES : -ENODEV; + break; } default: - return ccode; + ret = ccode; } + spin_unlock_irqrestore(sch->lock, flags); + return ret; } static void fsm_notoper(struct vfio_ccw_private *private, diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 4326715dc13e..78b98b3e7efa 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -557,7 +557,6 @@ enum qeth_prot_versions { enum qeth_cmd_buffer_state { BUF_STATE_FREE, BUF_STATE_LOCKED, - BUF_STATE_PROCESSED, }; enum qeth_cq { @@ -601,7 +600,6 @@ struct qeth_channel { struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; atomic_t irq_pending; int io_buf_no; - int buf_no; }; /** diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 04fefa5bb08d..dffd820731f2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); - atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); - channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); @@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel) kfree(channel->iob[cnt].data); return -ENOMEM; } - channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); @@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, { int rc; int cstat, dstat; - struct qeth_cmd_buffer *buffer; + struct qeth_cmd_buffer *iob = NULL; struct qeth_channel *channel; struct qeth_card *card; - struct qeth_cmd_buffer *iob; - __u8 index; - - if (__qeth_check_irb_error(cdev, intparm, irb)) - return; - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) @@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel = &card->data; QETH_CARD_TEXT(card, 5, "data"); } + + if (qeth_intparm_is_iob(intparm)) + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + + if (__qeth_check_irb_error(cdev, intparm, irb)) { + /* IO was terminated, free its resources. */ + if (iob) + qeth_release_buffer(iob->channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return; + } + atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) @@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, /* we don't have to handle this further */ intparm = 0; } + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { @@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state = CH_STATE_RCD_DONE; goto out; } - if (intparm) { - buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - buffer->state = BUF_STATE_PROCESSED; - } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) __qeth_issue_next_read(card); - iob = channel->iob; - index = channel->buf_no; - while (iob[index].state == BUF_STATE_PROCESSED) { - if (iob[index].callback != NULL) - iob[index].callback(channel, iob + index); + if (iob && iob->callback) + iob->callback(iob->channel, iob); - index = (index + 1) % QETH_CMD_BUFFER_NO; - } - channel->buf_no = index; out: wake_up(&card->wait_q); return; @@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; @@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); - qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); @@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, event_timeout); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " @@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, } } - if (reply->rc == -EIO) - goto error; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -2211,10 +2204,6 @@ time_err: list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); atomic_inc(&reply->received); -error: - atomic_set(&card->write.irq_pending, 0); - qeth_release_buffer(iob->channel, iob); - card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card) return rc; } -static int qeth_default_setadapterparms_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) { - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "defadpcb"); - - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) + if (!cmd->hdr.return_code) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - return 0; + return cmd->hdr.return_code; } static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 3, "quyadpcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; @@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; - return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); + return 0; } static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, @@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - struct qeth_switch_info *sw_info; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_query_switch_attributes *attrs; + struct qeth_switch_info *sw_info; QETH_CARD_TEXT(card, 2, "qswiatcb"); - cmd = (struct qeth_ipa_cmd *) data; - sw_info = (struct qeth_switch_info *)reply->param; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - attrs = &cmd->data.setadapterparms.data.query_switch_attributes; - sw_info->capabilities = attrs->capabilities; - sw_info->settings = attrs->settings; - QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, - sw_info->settings); - } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + sw_info = (struct qeth_switch_info *)reply->param; + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; + sw_info->capabilities = attrs->capabilities; + sw_info->settings = attrs->settings; + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, + sw_info->settings); return 0; } @@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_setadpparms *setparms; QETH_CARD_TEXT(card, 4, "prmadpcb"); - cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); - - qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); - if (cmd->hdr.return_code) { + if (qeth_setadpparms_inspect_rc(cmd)) { QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } @@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 4, "chgmaccb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { ether_addr_copy(card->dev->dev_addr, cmd->data.setadapterparms.data.change_addr.addr); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_set_access_ctrl *access_ctrl_req; int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); + if (cmd->hdr.return_code) + return 0; + qeth_setadpparms_inspect_rc(cmd); - cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); @@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->options.isolation = card->options.prev_isolation; break; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4695,14 +4675,15 @@ out: static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_qoat_priv *priv; char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *)data; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; @@ -4796,21 +4777,18 @@ out: static int qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_query_card_info *card_info; - struct carrier_info *carrier_info; QETH_CARD_TEXT(card, 2, "qcrdincb"); - carrier_info = (struct carrier_info *)reply->param; - cmd = (struct qeth_ipa_cmd *)data; - card_info = &cmd->data.setadapterparms.data.card_info; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - carrier_info->card_type = card_info->card_type; - carrier_info->port_mode = card_info->port_mode; - carrier_info->port_speed = card_info->port_speed; - } + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + card_info = &cmd->data.setadapterparms.data.card_info; + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; return 0; } @@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_DDEV(card), &id); + ccw_device_get_id(CARD_RDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; @@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void) mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); + if (!qeth_wq) { + rc = -ENOMEM; + goto out_err; + } rc = qeth_register_dbf_views(); if (rc) - goto out_err; + goto dbf_err; qeth_core_root_dev = root_device_register("qeth"); rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) @@ -6603,6 +6585,8 @@ slab_err: root_device_unregister(qeth_core_root_dev); register_err: qeth_unregister_dbf_views(); +dbf_err: + destroy_workqueue(qeth_wq); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 619f897b4bb0..f4d1ec0b8f5a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_HALT_CHANNEL_PARM -11 #define QETH_RCD_PARM -12 +static inline bool qeth_intparm_is_iob(unsigned long intparm) +{ + switch (intparm) { + case QETH_CLEAR_CHANNEL_PARM: + case QETH_HALT_CHANNEL_PARM: + case QETH_RCD_PARM: + case 0: + return false; + } + return true; +} + /*****************************************************************************/ /* IP Assist related definitions */ /*****************************************************************************/ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 50a313806dde..b8079f2a65b3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -21,7 +21,6 @@ #include <linux/list.h> #include <linux/hash.h> #include <linux/hashtable.h> -#include <linux/string.h> #include <asm/setup.h> #include "qeth_core.h" #include "qeth_l2.h" @@ -122,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) QETH_CARD_TEXT(card, 2, "L2Setmac"); rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - ether_addr_copy(card->dev->dev_addr, mac); dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); + "MAC address %pM successfully registered on device %s\n", + mac, card->dev->name); } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; switch (rc) { case -EEXIST: dev_warn(&card->gdev->dev, @@ -143,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) return rc; } -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? @@ -520,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct qeth_card *card = dev->ml_priv; + u8 old_addr[ETH_ALEN]; int rc = 0; QETH_CARD_TEXT(card, 3, "setmac"); @@ -531,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -EOPNOTSUPP; } QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "setmcREC"); return -ERESTARTSYS; } - rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc || (rc == -ENOENT)) - rc = qeth_l2_send_setmac(card, addr->sa_data); - return rc ? -EINVAL : 0; + + if (!qeth_card_hw_is_reachable(card)) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; + } + + /* don't register the same address twice */ + if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && + (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + + /* add the new address, switch over, drop the old */ + rc = qeth_l2_send_setmac(card, addr->sa_data); + if (rc) + return rc; + ether_addr_copy(old_addr, dev->dev_addr); + ether_addr_copy(dev->dev_addr, addr->sa_data); + + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) + qeth_l2_remove_mac(card, old_addr); + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + return 0; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1068,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - if (card->info.type != QETH_CARD_TYPE_OSN) - qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); + if (card->info.type != QETH_CARD_TYPE_OSN && + !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && @@ -1339,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3b0c8b8a7634..066b5c3aaae6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -176,7 +176,7 @@ static struct device_driver smsg_driver = { static void __exit smsg_exit(void) { - cpcmd("SET SMSG IUCV", NULL, 0, NULL); + cpcmd("SET SMSG OFF", NULL, 0, NULL); device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c index c44d7c7ffc92..1754f55e2fac 100644 --- a/drivers/sbus/char/oradax.c +++ b/drivers/sbus/char/oradax.c @@ -3,7 +3,7 @@ * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or + * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index 884419c37e84..457ea1f8db30 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -183,7 +183,7 @@ static u16 slim_slicesize(int code) 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7 }; - clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); + code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); return sizetocode[code - 1]; } diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c index fe96a8b956fb..f7ed1187518b 100644 --- a/drivers/soc/bcm/raspberrypi-power.c +++ b/drivers/soc/bcm/raspberrypi-power.c @@ -45,7 +45,7 @@ struct rpi_power_domains { struct rpi_power_domain_packet { u32 domain; u32 on; -} __packet; +}; /* * Asks the firmware to enable or disable power on a specific power diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index 16cab40156ca..aeab05f682d9 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -1799,7 +1799,7 @@ static int imx_csi_probe(struct platform_device *pdev) priv->dev->of_node = pdata->of_node; pinctrl = devm_pinctrl_get_select_default(priv->dev); if (IS_ERR(pinctrl)) { - ret = PTR_ERR(priv->vdev); + ret = PTR_ERR(pinctrl); dev_dbg(priv->dev, "devm_pinctrl_get_select_default() failed: %d\n", ret); if (ret != -ENODEV) diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 6b5300ca44a6..885f5fcead77 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -1390,7 +1390,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif, } if (hif_drv->usr_conn_req.ies) { - conn_info.req_ies = kmemdup(conn_info.req_ies, + conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies, hif_drv->usr_conn_req.ies_len, GFP_KERNEL); if (conn_info.req_ies) diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index 8a7f24dd9315..0c19fcd56a0d 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv) return -EFAULT; } + priv->priv = obj; obj->max_state = p->package.count - 1; obj->cdev = thermal_cooling_device_register(acpi_device_bid(priv->adev), @@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv) if (IS_ERR(obj->cdev)) result = PTR_ERR(obj->cdev); - priv->priv = obj; - kfree(buf.pointer); /* TODO: add ACPI notification support */ diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ed805c7c5ace..ac83f721db24 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -185,6 +185,7 @@ * @regulator: pointer to the TMU regulator structure. * @reg_conf: pointer to structure to register with core thermal. * @ntrip: number of supported trip points. + * @enabled: current status of TMU device * @tmu_initialize: SoC specific TMU initialization method * @tmu_control: SoC specific TMU control method * @tmu_read: SoC specific TMU temperature read method @@ -205,6 +206,7 @@ struct exynos_tmu_data { struct regulator *regulator; struct thermal_zone_device *tzd; unsigned int ntrip; + bool enabled; int (*tmu_initialize)(struct platform_device *pdev); void (*tmu_control)(struct platform_device *pdev, bool on); @@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) mutex_lock(&data->lock); clk_enable(data->clk); data->tmu_control(pdev, on); + data->enabled = on; clk_disable(data->clk); mutex_unlock(&data->lock); } @@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) static int exynos_get_temp(void *p, int *temp) { struct exynos_tmu_data *data = p; + int value, ret = 0; - if (!data || !data->tmu_read) + if (!data || !data->tmu_read || !data->enabled) return -EINVAL; mutex_lock(&data->lock); clk_enable(data->clk); - *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS; + value = data->tmu_read(data); + if (value < 0) + ret = value; + else + *temp = code_to_temp(data, value) * MCELSIUS; clk_disable(data->clk); mutex_unlock(&data->lock); - return 0; + return ret; } #ifdef CONFIG_THERMAL_EMULATION diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 3b3e1f6632d7..1dbe27c9946c 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -121,6 +121,9 @@ struct gsm_dlci { struct mutex mutex; /* Link layer */ + int mode; +#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */ +#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */ spinlock_t lock; /* Protects the internal state */ struct timer_list t1; /* Retransmit timer for SABM and UA */ int retries; @@ -1364,7 +1367,13 @@ retry: ctrl->data = data; ctrl->len = clen; gsm->pending_cmd = ctrl; - gsm->cretries = gsm->n2; + + /* If DLCI0 is in ADM mode skip retries, it won't respond */ + if (gsm->dlci[0]->mode == DLCI_MODE_ADM) + gsm->cretries = 1; + else + gsm->cretries = gsm->n2; + mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); gsm_control_transmit(gsm, ctrl); spin_unlock_irqrestore(&gsm->control_lock, flags); @@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t) if (debug & 8) pr_info("DLCI %d opening in ADM mode.\n", dlci->addr); + dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { gsm_dlci_close(dlci); @@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) static int gsm_carrier_raised(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); + struct gsm_mux *gsm = dlci->gsm; + /* Not yet open so no carrier info */ if (dlci->state != DLCI_OPEN) return 0; if (debug & 2) return 1; + + /* + * Basic mode with control channel in ADM mode may not respond + * to CMD_MSC at all and modem_rx is empty. + */ + if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM && + !dlci->modem_rx) + return 1; + return dlci->modem_rx & TIOCM_CD; } diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index a24278380fec..22683393a0f2 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match) */ int __init setup_earlycon(char *buf) { - const struct earlycon_id *match; + const struct earlycon_id **p_match; if (!buf || !buf[0]) return -EINVAL; @@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf) if (early_con.flags & CON_ENABLED) return -EALREADY; - for (match = __earlycon_table; match < __earlycon_table_end; match++) { + for (p_match = __earlycon_table; p_match < __earlycon_table_end; + p_match++) { + const struct earlycon_id *match = *p_match; size_t len = strlen(match->name); if (strncmp(buf, match->name, len)) diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 91f3a1a5cb7f..c2fc6bef7a6f 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -316,7 +316,7 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset) * differ from the value that was last written. As it only * clears after being set, reread conditionally. */ - if (sport->ucr2 & UCR2_SRST) + if (!(sport->ucr2 & UCR2_SRST)) sport->ucr2 = readl(sport->port.membase + offset); return sport->ucr2; break; @@ -1833,6 +1833,11 @@ static int imx_uart_rs485_config(struct uart_port *port, rs485conf->flags &= ~SER_RS485_ENABLED; if (rs485conf->flags & SER_RS485_ENABLED) { + /* Enable receiver if low-active RTS signal is requested */ + if (sport->have_rtscts && !sport->have_rtsgpio && + !(rs485conf->flags & SER_RS485_RTS_ON_SEND)) + rs485conf->flags |= SER_RS485_RX_DURING_TX; + /* disable transmitter */ ucr2 = imx_uart_readl(sport, UCR2); if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) @@ -2265,6 +2270,18 @@ static int imx_uart_probe(struct platform_device *pdev) (!sport->have_rtscts && !sport->have_rtsgpio)) dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); + /* + * If using the i.MX UART RTS/CTS control then the RTS (CTS_B) + * signal cannot be set low during transmission in case the + * receiver is off (limitation of the i.MX UART IP). + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED && + sport->have_rtscts && !sport->have_rtsgpio && + (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) && + !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX))) + dev_err(&pdev->dev, + "low-active RTS not possible when receiver is off, enabling receiver\n"); + imx_uart_rs485_config(&sport->port, &sport->port.rs485); /* Disable interrupts before requesting them */ diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 750e5645dc85..f503fab1e268 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port, termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); termios->c_cflag &= CREAD | CBAUD; termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); - termios->c_lflag = old->c_lflag; } spin_unlock_irqrestore(&port->lock, flags); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 65ff669373d4..a1b3eb04cb32 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1022,6 +1022,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) struct qcom_geni_serial_port *port; struct uart_port *uport; struct resource *res; + int irq; if (pdev->dev.of_node) line = of_alias_get_id(pdev->dev.of_node, "serial"); @@ -1061,11 +1062,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS; port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; - uport->irq = platform_get_irq(pdev, 0); - if (uport->irq < 0) { - dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq); - return uport->irq; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq); + return irq; } + uport->irq = irq; uport->private_data = &qcom_geni_console_driver; platform_set_drvdata(pdev, port); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index abcb4d09a2d8..bd72dd843338 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1181,7 +1181,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device, /* only set baud if specified on command line - otherwise * assume it has been initialized by a boot loader. */ - if (device->baud) { + if (port->uartclk && device->baud) { u32 cd = 0, bdiv = 0; u32 mr; int div8; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 63114ea35ec1..7c838b90a31d 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) kref_init(&tty->kref); tty->magic = TTY_MAGIC; - tty_ldisc_init(tty); + if (tty_ldisc_init(tty)) { + kfree(tty); + return NULL; + } tty->session = NULL; tty->pgrp = NULL; mutex_init(&tty->legacy_mutex); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 050f4d650891..fb7329ab2b37 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) return ERR_CAST(ldops); } - ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); - if (ld == NULL) { - put_ldops(ldops); - return ERR_PTR(-ENOMEM); - } - + /* + * There is no way to handle allocation failure of only 16 bytes. + * Let's simplify error handling and save more memory. + */ + ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL); ld->ops = ldops; ld->tty = tty; @@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld) static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) { /* There is an outstanding reference here so this is safe */ - old = tty_ldisc_get(tty, old->ops->num); - WARN_ON(IS_ERR(old)); - tty->ldisc = old; - tty_set_termios_ldisc(tty, old->ops->num); - if (tty_ldisc_open(tty, old) < 0) { - tty_ldisc_put(old); + if (tty_ldisc_failto(tty, old->ops->num) < 0) { + const char *name = tty_name(tty); + + pr_warn("Falling back ldisc for %s.\n", name); /* The traditional behaviour is to fall back to N_TTY, we want to avoid falling back to N_NULL unless we have no choice to avoid the risk of breaking anything */ if (tty_ldisc_failto(tty, N_TTY) < 0 && tty_ldisc_failto(tty, N_NULL) < 0) - panic("Couldn't open N_NULL ldisc for %s.", - tty_name(tty)); + panic("Couldn't open N_NULL ldisc for %s.", name); } } @@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release); * the tty structure is not completely set up when this call is made. */ -void tty_ldisc_init(struct tty_struct *tty) +int tty_ldisc_init(struct tty_struct *tty) { struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); if (IS_ERR(ld)) - panic("n_tty: init_tty"); + return PTR_ERR(ld); tty->ldisc = ld; + return 0; } /** diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index f695a7e8c314..c690d100adcd 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -19,7 +19,7 @@ * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \ * > /sys/bus/vmbus/drivers/uio_hv_generic/bind */ - +#define DEBUG 1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> @@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state) */ static void hv_uio_channel_cb(void *context) { - struct hv_uio_private_data *pdata = context; - struct hv_device *dev = pdata->device; + struct vmbus_channel *chan = context; + struct hv_device *hv_dev = chan->device_obj; + struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); - dev->channel->inbound.ring_buffer->interrupt_mask = 1; + chan->inbound.ring_buffer->interrupt_mask = 1; virt_mb(); uio_event_notify(&pdata->info); @@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel) uio_event_notify(&pdata->info); } -/* - * Handle fault when looking for sub channel ring buffer - * Subchannel ring buffer is same as resource 0 which is main ring buffer - * This is derived from uio_vma_fault +/* Sysfs API to allow mmap of the ring buffers + * The ring buffer is allocated as contiguous memory by vmbus_open */ -static int hv_uio_vma_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - void *ring_buffer = vma->vm_private_data; - struct page *page; - void *addr; - - addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT); - page = virt_to_page(addr); - get_page(page); - vmf->page = page; - return 0; -} - -static const struct vm_operations_struct hv_uio_vm_ops = { - .fault = hv_uio_vma_fault, -}; - -/* Sysfs API to allow mmap of the ring buffers */ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj); - unsigned long requested_pages, actual_pages; - - if (vma->vm_end < vma->vm_start) - return -EINVAL; - - /* only allow 0 for now */ - if (vma->vm_pgoff > 0) - return -EINVAL; + struct hv_device *dev = channel->primary_channel->device_obj; + u16 q_idx = channel->offermsg.offer.sub_channel_index; - requested_pages = vma_pages(vma); - actual_pages = 2 * HV_RING_SIZE; - if (requested_pages > actual_pages) - return -EINVAL; + dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n", + q_idx, vma_pages(vma), vma->vm_pgoff); - vma->vm_private_data = channel->ringbuffer_pages; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_ops = &hv_uio_vm_ops; - return 0; + return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages), + channel->ringbuffer_pagecount << PAGE_SHIFT); } -static struct bin_attribute ring_buffer_bin_attr __ro_after_init = { +static const struct bin_attribute ring_buffer_bin_attr = { .attr = { .name = "ring", .mode = 0600, - /* size is set at init time */ }, + .size = 2 * HV_RING_SIZE * PAGE_SIZE, .mmap = hv_uio_ring_mmap, }; -/* Callback from VMBUS subystem when new channel created. */ +/* Callback from VMBUS subsystem when new channel created. */ static void hv_uio_new_channel(struct vmbus_channel *new_sc) { struct hv_device *hv_dev = new_sc->primary_channel->device_obj; struct device *device = &hv_dev->device; - struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE; int ret; /* Create host communication ring */ ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0, - hv_uio_channel_cb, pdata); + hv_uio_channel_cb, new_sc); if (ret) { dev_err(device, "vmbus_open subchannel failed: %d\n", ret); return; @@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev, ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, HV_RING_SIZE * PAGE_SIZE, NULL, 0, - hv_uio_channel_cb, pdata); + hv_uio_channel_cb, dev->channel); if (ret) goto fail; @@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev, vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind); vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel); + ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); + if (ret) + dev_notice(&dev->device, + "sysfs create ring bin file failed; %d\n", ret); + hv_set_drvdata(dev, pdata); return 0; diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 75f7fb151f71..987fc5ba6321 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -207,5 +207,6 @@ config USB_ULPI_BUS config USB_ROLE_SWITCH tristate + select USB_COMMON endif # USB_SUPPORT diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index c821b4b9647e..7b5cb28ffb35 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -191,7 +191,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = { static const unsigned short high_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1024, - [USB_ENDPOINT_XFER_BULK] = 512, + + /* Bulk should be 512, but some devices use 1024: we will warn below */ + [USB_ENDPOINT_XFER_BULK] = 1024, [USB_ENDPOINT_XFER_INT] = 1024, }; static const unsigned short super_speed_maxpacket_maxes[4] = { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 777036ae6367..0a42c5df3c0f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2262,7 +2262,8 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) hcd->state = HC_STATE_SUSPENDED; if (!PMSG_IS_AUTO(msg)) - usb_phy_roothub_power_off(hcd->phy_roothub); + usb_phy_roothub_suspend(hcd->self.sysdev, + hcd->phy_roothub); /* Did we race with a root-hub wakeup event? */ if (rhdev->do_remote_wakeup) { @@ -2302,7 +2303,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) } if (!PMSG_IS_AUTO(msg)) { - status = usb_phy_roothub_power_on(hcd->phy_roothub); + status = usb_phy_roothub_resume(hcd->self.sysdev, + hcd->phy_roothub); if (status) return status; } @@ -2344,7 +2346,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) } } else { hcd->state = old_state; - usb_phy_roothub_power_off(hcd->phy_roothub); + usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub); dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", "resume", status); if (status != -ESHUTDOWN) @@ -2377,6 +2379,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd) spin_lock_irqsave (&hcd_root_hub_lock, flags); if (hcd->rh_registered) { + pm_wakeup_event(&hcd->self.root_hub->dev, 0); set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); queue_work(pm_wq, &hcd->wakeup_work); } @@ -2758,12 +2761,16 @@ int usb_add_hcd(struct usb_hcd *hcd, } if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { - hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev); + hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); if (IS_ERR(hcd->phy_roothub)) { retval = PTR_ERR(hcd->phy_roothub); - goto err_phy_roothub_init; + goto err_phy_roothub_alloc; } + retval = usb_phy_roothub_init(hcd->phy_roothub); + if (retval) + goto err_phy_roothub_alloc; + retval = usb_phy_roothub_power_on(hcd->phy_roothub); if (retval) goto err_usb_phy_roothub_power_on; @@ -2936,7 +2943,7 @@ err_create_buf: usb_phy_roothub_power_off(hcd->phy_roothub); err_usb_phy_roothub_power_on: usb_phy_roothub_exit(hcd->phy_roothub); -err_phy_roothub_init: +err_phy_roothub_alloc: if (hcd->remove_phy && hcd->usb_phy) { usb_phy_shutdown(hcd->usb_phy); usb_put_phy(hcd->usb_phy); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f6ea16e9f6bb..aa9968d90a48 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum) { struct usb_hub *hub; + struct usb_port *port_dev; if (!hdev) return; hub = usb_hub_to_struct_hub(hdev); if (hub) { + port_dev = hub->ports[portnum - 1]; + if (port_dev && port_dev->child) + pm_wakeup_event(&port_dev->child->dev, 0); + set_bit(portnum, hub->wakeup_bits); kick_hub_wq(hub); } @@ -3434,8 +3439,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); - if (status == 0 && !port_is_suspended(hub, portstatus)) + if (status == 0 && !port_is_suspended(hub, portstatus)) { + if (portchange & USB_PORT_STAT_C_SUSPEND) + pm_wakeup_event(&udev->dev, 0); goto SuspendCleared; + } /* see 7.1.7.7; affects power usage, but not budgeting */ if (hub_is_superspeed(hub->hdev)) diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c index 09b7c43c0ea4..9879767452a2 100644 --- a/drivers/usb/core/phy.c +++ b/drivers/usb/core/phy.c @@ -19,19 +19,6 @@ struct usb_phy_roothub { struct list_head list; }; -static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) -{ - struct usb_phy_roothub *roothub_entry; - - roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); - if (!roothub_entry) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&roothub_entry->list); - - return roothub_entry; -} - static int usb_phy_roothub_add_phy(struct device *dev, int index, struct list_head *list) { @@ -45,9 +32,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index, return PTR_ERR(phy); } - roothub_entry = usb_phy_roothub_alloc(dev); - if (IS_ERR(roothub_entry)) - return PTR_ERR(roothub_entry); + roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); + if (!roothub_entry) + return -ENOMEM; + + INIT_LIST_HEAD(&roothub_entry->list); roothub_entry->phy = phy; @@ -56,28 +45,44 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index, return 0; } -struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) +struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) { struct usb_phy_roothub *phy_roothub; - struct usb_phy_roothub *roothub_entry; - struct list_head *head; int i, num_phys, err; + if (!IS_ENABLED(CONFIG_GENERIC_PHY)) + return NULL; + num_phys = of_count_phandle_with_args(dev->of_node, "phys", "#phy-cells"); if (num_phys <= 0) return NULL; - phy_roothub = usb_phy_roothub_alloc(dev); - if (IS_ERR(phy_roothub)) - return phy_roothub; + phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); + if (!phy_roothub) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&phy_roothub->list); for (i = 0; i < num_phys; i++) { err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); if (err) - goto err_out; + return ERR_PTR(err); } + return phy_roothub; +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc); + +int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub) +{ + struct usb_phy_roothub *roothub_entry; + struct list_head *head; + int err; + + if (!phy_roothub) + return 0; + head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { @@ -86,14 +91,13 @@ struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) goto err_exit_phys; } - return phy_roothub; + return 0; err_exit_phys: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_exit(roothub_entry->phy); -err_out: - return ERR_PTR(err); + return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_init); @@ -111,7 +115,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub) list_for_each_entry(roothub_entry, head, list) { err = phy_exit(roothub_entry->phy); if (err) - ret = ret; + ret = err; } return ret; @@ -156,3 +160,38 @@ void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub) phy_power_off(roothub_entry->phy); } EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); + +int usb_phy_roothub_suspend(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub) +{ + usb_phy_roothub_power_off(phy_roothub); + + /* keep the PHYs initialized so the device can wake up the system */ + if (device_may_wakeup(controller_dev)) + return 0; + + return usb_phy_roothub_exit(phy_roothub); +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend); + +int usb_phy_roothub_resume(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub) +{ + int err; + + /* if the device can't wake up the system _exit was called */ + if (!device_may_wakeup(controller_dev)) { + err = usb_phy_roothub_init(phy_roothub); + if (err) + return err; + } + + err = usb_phy_roothub_power_on(phy_roothub); + + /* undo _init if _power_on failed */ + if (err && !device_may_wakeup(controller_dev)) + usb_phy_roothub_exit(phy_roothub); + + return err; +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_resume); diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h index 6fde59bfbff8..88a3c037e9df 100644 --- a/drivers/usb/core/phy.h +++ b/drivers/usb/core/phy.h @@ -1,7 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * USB roothub wrapper + * + * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#ifndef __USB_CORE_PHY_H_ +#define __USB_CORE_PHY_H_ + +struct device; struct usb_phy_roothub; -struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev); +struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev); + +int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub); int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub); int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub); void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub); + +int usb_phy_roothub_suspend(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub); +int usb_phy_roothub_resume(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub); + +#endif /* __USB_CORE_PHY_H_ */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 920f48a49a87..c55def2f1320 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -186,6 +186,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* HP v222w 16GB Mini USB Drive */ + { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index d83be5651f87..a666e0758a99 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -985,6 +985,7 @@ struct dwc2_hsotg { /* DWC OTG HW Release versions */ #define DWC2_CORE_REV_2_71a 0x4f54271a +#define DWC2_CORE_REV_2_72a 0x4f54272a #define DWC2_CORE_REV_2_80a 0x4f54280a #define DWC2_CORE_REV_2_90a 0x4f54290a #define DWC2_CORE_REV_2_91a 0x4f54291a @@ -992,6 +993,7 @@ struct dwc2_hsotg { #define DWC2_CORE_REV_2_94a 0x4f54294a #define DWC2_CORE_REV_3_00a 0x4f54300a #define DWC2_CORE_REV_3_10a 0x4f54310a +#define DWC2_CORE_REV_4_00a 0x4f54400a #define DWC2_FS_IOT_REV_1_00a 0x5531100a #define DWC2_HS_IOT_REV_1_00a 0x5532100a diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 6c32bf26e48e..83cb5577a52f 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3928,6 +3928,27 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, if (index && !hs_ep->isochronous) epctrl |= DXEPCTL_SETD0PID; + /* WA for Full speed ISOC IN in DDMA mode. + * By Clear NAK status of EP, core will send ZLP + * to IN token and assert NAK interrupt relying + * on TxFIFO status only + */ + + if (hsotg->gadget.speed == USB_SPEED_FULL && + hs_ep->isochronous && dir_in) { + /* The WA applies only to core versions from 2.72a + * to 4.00a (including both). Also for FS_IOT_1.00a + * and HS_IOT_1.00a. + */ + u32 gsnpsid = dwc2_readl(hsotg->regs + GSNPSID); + + if ((gsnpsid >= DWC2_CORE_REV_2_72a && + gsnpsid <= DWC2_CORE_REV_4_00a) || + gsnpsid == DWC2_FS_IOT_REV_1_00a || + gsnpsid == DWC2_HS_IOT_REV_1_00a) + epctrl |= DXEPCTL_CNAK; + } + dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", __func__, epctrl); diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 190f95964000..c51b73b3e048 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -358,9 +358,14 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg) { + int ret; + hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus"); - if (IS_ERR(hsotg->vbus_supply)) - return 0; + if (IS_ERR(hsotg->vbus_supply)) { + ret = PTR_ERR(hsotg->vbus_supply); + hsotg->vbus_supply = NULL; + return ret == -ENODEV ? 0 : ret; + } return regulator_enable(hsotg->vbus_supply); } @@ -4342,9 +4347,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd) spin_unlock_irqrestore(&hsotg->lock, flags); - dwc2_vbus_supply_init(hsotg); - - return 0; + return dwc2_vbus_supply_init(hsotg); } /* diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index 7f21747007f1..bea2e8ec0369 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c @@ -141,8 +141,10 @@ static int dwc2_pci_probe(struct pci_dev *pci, goto err; glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL); - if (!glue) + if (!glue) { + ret = -ENOMEM; goto err; + } ret = platform_device_add(dwc2); if (ret) { diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8796a5ee9bb9..0dedf8a799f4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -166,7 +166,7 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep) dwc3_ep_inc_trb(&dep->trb_dequeue); } -void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, +static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; @@ -1424,7 +1424,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, dwc->lock); if (!r->trb) - goto out1; + goto out0; if (r->num_pending_sgs) { struct dwc3_trb *trb; diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 7889bcc0509a..8b72b192c747 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -221,7 +221,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req) netif_wake_queue(dev); } -static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct phonet_port *port = netdev_priv(dev); struct f_phonet *fp; diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index 4c6c08b675b5..21307d862af6 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -73,9 +73,10 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) if (!qh) goto done; qh->hw = (struct ehci_qh_hw *) - dma_pool_zalloc(ehci->qh_pool, flags, &dma); + dma_pool_alloc(ehci->qh_pool, flags, &dma); if (!qh->hw) goto fail; + memset(qh->hw, 0, sizeof *qh->hw); qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 28e2a338b481..e56db44708bc 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1287,7 +1287,7 @@ itd_urb_transaction( } else { alloc_itd: spin_unlock_irqrestore(&ehci->lock, flags); - itd = dma_pool_zalloc(ehci->itd_pool, mem_flags, + itd = dma_pool_alloc(ehci->itd_pool, mem_flags, &itd_dma); spin_lock_irqsave(&ehci->lock, flags); if (!itd) { @@ -1297,6 +1297,7 @@ itd_urb_transaction( } } + memset(itd, 0, sizeof(*itd)); itd->itd_dma = itd_dma; itd->frame = NO_FRAME; list_add(&itd->itd_list, &sched->td_list); @@ -2080,7 +2081,7 @@ sitd_urb_transaction( } else { alloc_sitd: spin_unlock_irqrestore(&ehci->lock, flags); - sitd = dma_pool_zalloc(ehci->sitd_pool, mem_flags, + sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags, &sitd_dma); spin_lock_irqsave(&ehci->lock, flags); if (!sitd) { @@ -2090,6 +2091,7 @@ sitd_urb_transaction( } } + memset(sitd, 0, sizeof(*sitd)); sitd->sitd_dma = sitd_dma; sitd->frame = NO_FRAME; list_add(&sitd->sitd_list, &iso_sched->td_list); diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 48779c44c361..eb494ec547e8 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci) void xhci_dbc_tty_unregister_driver(void) { - tty_unregister_driver(dbc_tty_driver); - put_tty_driver(dbc_tty_driver); - dbc_tty_driver = NULL; + if (dbc_tty_driver) { + tty_unregister_driver(dbc_tty_driver); + put_tty_driver(dbc_tty_driver); + dbc_tty_driver = NULL; + } } static void dbc_rx_push(unsigned long _port) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index f17b7eab66cf..85ffda85f8ab 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; - if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == 0x15e0 || + pdev->device == 0x15e1 || + pdev->device == 0x43bb)) xhci->quirks |= XHCI_SUSPEND_DELAY; if (pdev->vendor == PCI_VENDOR_ID_AMD) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index df327dcc2bac..c1b22fc64e38 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -157,6 +157,7 @@ static int xhci_plat_probe(struct platform_device *pdev) struct resource *res; struct usb_hcd *hcd; struct clk *clk; + struct clk *reg_clk; int ret; int irq; @@ -226,17 +227,27 @@ static int xhci_plat_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); /* - * Not all platforms have a clk so it is not an error if the - * clock does not exists. + * Not all platforms have clks so it is not an error if the + * clock do not exist. */ + reg_clk = devm_clk_get(&pdev->dev, "reg"); + if (!IS_ERR(reg_clk)) { + ret = clk_prepare_enable(reg_clk); + if (ret) + goto put_hcd; + } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto put_hcd; + } + clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(clk)) { ret = clk_prepare_enable(clk); if (ret) - goto put_hcd; + goto disable_reg_clk; } else if (PTR_ERR(clk) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto put_hcd; + goto disable_reg_clk; } xhci = hcd_to_xhci(hcd); @@ -252,6 +263,7 @@ static int xhci_plat_probe(struct platform_device *pdev) device_wakeup_enable(hcd->self.controller); xhci->clk = clk; + xhci->reg_clk = reg_clk; xhci->main_hcd = hcd; xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, dev_name(&pdev->dev), hcd); @@ -320,8 +332,10 @@ put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); disable_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); + +disable_reg_clk: + clk_disable_unprepare(reg_clk); put_hcd: usb_put_hcd(hcd); @@ -338,6 +352,7 @@ static int xhci_plat_remove(struct platform_device *dev) struct usb_hcd *hcd = platform_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct clk *clk = xhci->clk; + struct clk *reg_clk = xhci->reg_clk; xhci->xhc_state |= XHCI_STATE_REMOVING; @@ -347,8 +362,8 @@ static int xhci_plat_remove(struct platform_device *dev) usb_remove_hcd(hcd); usb_put_hcd(xhci->shared_hcd); - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); + clk_disable_unprepare(reg_clk); usb_put_hcd(hcd); pm_runtime_set_suspended(&dev->dev); @@ -420,7 +435,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, - .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = &xhci_plat_pm_ops, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 9b27798ecce5..711da3306b14 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3621,6 +3621,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } xhci_debugfs_remove_slot(xhci, udev->slot_id); + virt_dev->udev = NULL; ret = xhci_disable_slot(xhci, udev->slot_id); if (ret) xhci_free_virt_device(xhci, udev->slot_id); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 05c909b04f14..6dfc4867dbcf 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1729,8 +1729,9 @@ struct xhci_hcd { int page_shift; /* msi-x vectors */ int msix_count; - /* optional clock */ + /* optional clocks */ struct clk *clk; + struct clk *reg_clk; /* data structures */ struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 05a679d5e3a2..6a60bc0490c5 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -451,7 +451,6 @@ static int dsps_musb_init(struct musb *musb) if (!rev) return -ENODEV; - usb_phy_init(musb->xceiv); if (IS_ERR(musb->phy)) { musb->phy = NULL; } else { @@ -501,7 +500,6 @@ static int dsps_musb_exit(struct musb *musb) struct dsps_glue *glue = dev_get_drvdata(dev->parent); del_timer_sync(&musb->dev_timer); - usb_phy_shutdown(musb->xceiv); phy_power_off(musb->phy); phy_exit(musb->phy); debugfs_remove_recursive(glue->dbgfs_root); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index e564695c6c8d..71c5835ea9cd 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -417,7 +417,6 @@ void musb_g_tx(struct musb *musb, u8 epnum) req = next_request(musb_ep); request = &req->request; - trace_musb_req_tx(req); csr = musb_readw(epio, MUSB_TXCSR); musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr); @@ -456,6 +455,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) u8 is_dma = 0; bool short_packet = false; + trace_musb_req_tx(req); + if (dma && (csr & MUSB_TXCSR_DMAENAB)) { is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 3a8451a15f7f..e7f99d55922a 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -990,7 +990,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, /* set tx_reinit and schedule the next qh */ ep->tx_reinit = 1; } - musb_start_urb(musb, is_in, next_qh); + + if (next_qh) + musb_start_urb(musb, is_in, next_qh); } } @@ -2754,6 +2756,7 @@ int musb_host_setup(struct musb *musb, int power_budget) hcd->self.otg_port = 1; musb->xceiv->otg->host = &hcd->self; hcd->power_budget = 2 * (power_budget ? : 250); + hcd->skip_phy_initialization = 1; ret = usb_add_hcd(hcd, 0, 0); if (ret < 0) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index a646820f5a78..533f127c30ad 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE - Fundamental Software dongle. - Google USB serial devices - HP4x calculators + - Libtransistor USB console - a number of Motorola phones - Motorola Tetra devices - Novatel Wireless GPS receivers diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index de1e759dd512..eb6c26cbe579 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 87202ad5a50d..7ea221d42dba 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) return ftdi_jtag_probe(serial); if (udev->product && - (!strcmp(udev->product, "BeagleBone/XDS100V2") || + (!strcmp(udev->product, "Arrow USB Blaster") || + !strcmp(udev->product, "BeagleBone/XDS100V2") || !strcmp(udev->product, "SNAP Connect E10"))) return ftdi_jtag_probe(serial); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c3f252283ab9..2058852a87fa 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 +/* These u-blox products use Qualcomm's vendor ID */ +#define UBLOX_PRODUCT_R410M 0x90b2 /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -1065,6 +1067,9 @@ static const struct usb_device_id option_ids[] = { /* Yuga products use Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), .driver_info = RSVD(1) | RSVD(4) }, + /* u-blox products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), + .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = RSVD(4) }, diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4ef79e29cb26..40864c2bd9dc 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS); 0x01) } DEVICE(google, GOOGLE_IDS); +/* Libtransistor USB console */ +#define LIBTRANSISTOR_IDS() \ + { USB_DEVICE(0x1209, 0x8b00) } +DEVICE(libtransistor, LIBTRANSISTOR_IDS); + /* ViVOpay USB Serial Driver */ #define VIVOPAY_IDS() \ { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &funsoft_device, &flashloader_device, &google_device, + &libtransistor_device, &vivopay_device, &moto_modem_device, &motorola_tetra_device, @@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = { FUNSOFT_IDS(), FLASHLOADER_IDS(), GOOGLE_IDS(), + LIBTRANSISTOR_IDS(), VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index f5373ed2cd45..8ddbecc25d89 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -335,47 +335,48 @@ static int palm_os_3_probe(struct usb_serial *serial, goto exit; } - if (retval == sizeof(*connection_info)) { - connection_info = (struct visor_connection_info *) - transfer_buffer; - - num_ports = le16_to_cpu(connection_info->num_ports); - for (i = 0; i < num_ports; ++i) { - switch ( - connection_info->connections[i].port_function_id) { - case VISOR_FUNCTION_GENERIC: - string = "Generic"; - break; - case VISOR_FUNCTION_DEBUGGER: - string = "Debugger"; - break; - case VISOR_FUNCTION_HOTSYNC: - string = "HotSync"; - break; - case VISOR_FUNCTION_CONSOLE: - string = "Console"; - break; - case VISOR_FUNCTION_REMOTE_FILE_SYS: - string = "Remote File System"; - break; - default: - string = "unknown"; - break; - } - dev_info(dev, "%s: port %d, is for %s use\n", - serial->type->description, - connection_info->connections[i].port, string); - } + if (retval != sizeof(*connection_info)) { + dev_err(dev, "Invalid connection information received from device\n"); + retval = -ENODEV; + goto exit; } - /* - * Handle devices that report invalid stuff here. - */ + + connection_info = (struct visor_connection_info *)transfer_buffer; + + num_ports = le16_to_cpu(connection_info->num_ports); + + /* Handle devices that report invalid stuff here. */ if (num_ports == 0 || num_ports > 2) { dev_warn(dev, "%s: No valid connect info available\n", serial->type->description); num_ports = 2; } + for (i = 0; i < num_ports; ++i) { + switch (connection_info->connections[i].port_function_id) { + case VISOR_FUNCTION_GENERIC: + string = "Generic"; + break; + case VISOR_FUNCTION_DEBUGGER: + string = "Debugger"; + break; + case VISOR_FUNCTION_HOTSYNC: + string = "HotSync"; + break; + case VISOR_FUNCTION_CONSOLE: + string = "Console"; + break; + case VISOR_FUNCTION_REMOTE_FILE_SYS: + string = "Remote File System"; + break; + default: + string = "unknown"; + break; + } + dev_info(dev, "%s: port %d, is for %s use\n", + serial->type->description, + connection_info->connections[i].port, string); + } dev_info(dev, "%s: Number of ports: %d\n", serial->type->description, num_ports); diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index 677d12138dbd..ded49e3bf2b0 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -3725,6 +3725,7 @@ void tcpm_unregister_port(struct tcpm_port *port) for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++) typec_unregister_altmode(port->port_altmode[i]); typec_unregister_port(port->typec_port); + usb_role_switch_put(port->role_sw); tcpm_debugfs_exit(port); destroy_workqueue(port->wq); } diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c index 8b8406867c02..4b4c8d271b27 100644 --- a/drivers/usb/typec/tps6598x.c +++ b/drivers/usb/typec/tps6598x.c @@ -73,6 +73,7 @@ struct tps6598x { struct device *dev; struct regmap *regmap; struct mutex lock; /* device lock */ + u8 i2c_protocol:1; struct typec_port *port; struct typec_partner *partner; @@ -80,19 +81,39 @@ struct tps6598x { struct typec_capability typec_cap; }; +static int +tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len) +{ + u8 data[len + 1]; + int ret; + + if (!tps->i2c_protocol) + return regmap_raw_read(tps->regmap, reg, val, len); + + ret = regmap_raw_read(tps->regmap, reg, data, sizeof(data)); + if (ret) + return ret; + + if (data[0] < len) + return -EIO; + + memcpy(val, &data[1], len); + return 0; +} + static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val) { - return regmap_raw_read(tps->regmap, reg, val, sizeof(u16)); + return tps6598x_block_read(tps, reg, val, sizeof(u16)); } static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val) { - return regmap_raw_read(tps->regmap, reg, val, sizeof(u32)); + return tps6598x_block_read(tps, reg, val, sizeof(u32)); } static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val) { - return regmap_raw_read(tps->regmap, reg, val, sizeof(u64)); + return tps6598x_block_read(tps, reg, val, sizeof(u64)); } static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val) @@ -121,8 +142,8 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps) struct tps6598x_rx_identity_reg id; int ret; - ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP, - &id, sizeof(id)); + ret = tps6598x_block_read(tps, TPS_REG_RX_IDENTITY_SOP, + &id, sizeof(id)); if (ret) return ret; @@ -224,13 +245,13 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd, } while (val); if (out_len) { - ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1, - out_data, out_len); + ret = tps6598x_block_read(tps, TPS_REG_DATA1, + out_data, out_len); if (ret) return ret; val = out_data[0]; } else { - ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val); + ret = tps6598x_block_read(tps, TPS_REG_DATA1, &val, sizeof(u8)); if (ret) return ret; } @@ -385,6 +406,16 @@ static int tps6598x_probe(struct i2c_client *client) if (!vid) return -ENODEV; + /* + * Checking can the adapter handle SMBus protocol. If it can not, the + * driver needs to take care of block reads separately. + * + * FIXME: Testing with I2C_FUNC_I2C. regmap-i2c uses I2C protocol + * unconditionally if the adapter has I2C_FUNC_I2C set. + */ + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + tps->i2c_protocol = true; + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) return ret; diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index b57891c1fd31..7afbea512207 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o typec_ucsi-y := ucsi.o -typec_ucsi-$(CONFIG_FTRACE) += trace.o +typec_ucsi-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index bf0977fbd100..bd5cca5632b3 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -28,7 +28,7 @@ * difficult to estimate the time it takes for the system to process the command * before it is actually passed to the PPM. */ -#define UCSI_TIMEOUT_MS 1000 +#define UCSI_TIMEOUT_MS 5000 /* * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index c31c8402a0c5..d41d0cdeec0f 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf, if (!bid) return -ENODEV; + /* device_attach() callers should hold parent lock for USB */ + if (bid->udev->dev.parent) + device_lock(bid->udev->dev.parent); ret = device_attach(&bid->udev->dev); + if (bid->udev->dev.parent) + device_unlock(bid->udev->dev.parent); if (ret < 0) { dev_err(&bid->udev->dev, "rebind failed\n"); return ret; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 473fb8a87289..bf8afe9b5883 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -243,7 +243,7 @@ enum usbip_side { #define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) #define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) -#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) +#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE) #define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) #define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) #define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 5b4c0864ad92..5d88917c9631 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work) unset_event(ud, USBIP_EH_UNUSABLE); } - /* Stop the error handler. */ - if (ud->event & USBIP_EH_BYE) - usbip_dbg_eh("removed %p\n", ud); - wake_up(&ud->eh_waitq); } } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 20e3d4609583..d11f3f8dad40 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, usbip_dbg_vhci_rh(" ClearHubFeature\n"); break; case ClearPortFeature: + if (rhport < 0) + goto error; switch (wValue) { case USB_PORT_FEAT_SUSPEND: if (hcd->speed == HCD_USB3) { @@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, goto error; } + if (rhport < 0) + goto error; + vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND; break; case USB_PORT_FEAT_POWER: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_POWER\n"); + if (rhport < 0) + goto error; if (hcd->speed == HCD_USB3) vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; else @@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_BH_PORT_RESET: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n"); + if (rhport < 0) + goto error; /* Applicable only for USB3.0 hub */ if (hcd->speed != HCD_USB3) { pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " @@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_RESET: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_RESET\n"); + if (rhport < 0) + goto error; /* if it's already enabled, disable */ if (hcd->speed == HCD_USB3) { vhci_hcd->port_status[rhport] = 0; @@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: usbip_dbg_vhci_rh(" SetPortFeature: default %d\n", wValue); + if (rhport < 0) + goto error; if (hcd->speed == HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 190dbf8cfcb5..2f3856a95856 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev) } out: - kfree(req); + vbg_req_free(req, sizeof(*req)); kfree(pages); } @@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev) rc = vbg_req_perform(gdev, req); - kfree(req); + vbg_req_free(req, sizeof(*req)); if (rc < 0) { vbg_err("%s error: %d\n", __func__, rc); @@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev) ret = vbg_status_code_to_errno(rc); out_free: - kfree(req2); - kfree(req1); + vbg_req_free(req2, sizeof(*req2)); + vbg_req_free(req1, sizeof(*req1)); return ret; } @@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ rc = VINF_SUCCESS; - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) rc = vbg_req_perform(gdev, req); do_div(req->interval_ns, 1000000); /* ns -> ms */ gdev->heartbeat_interval_ms = req->interval_ns; - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev) if (ret < 0) return ret; - /* - * Preallocate the request to use it from the timer callback because: - * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL - * and the timer callback runs at DISPATCH_LEVEL; - * 2) avoid repeated allocations. - */ gdev->guest_heartbeat_req = vbg_req_alloc( sizeof(*gdev->guest_heartbeat_req), VMMDEVREQ_GUEST_HEARTBEAT); @@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev) { del_timer_sync(&gdev->heartbeat_timer); vbg_heartbeat_host_config(gdev, false); - kfree(gdev->guest_heartbeat_req); - + vbg_req_free(gdev->guest_heartbeat_req, + sizeof(*gdev->guest_heartbeat_req)); } /** @@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, if (rc < 0) vbg_err("%s error, rc: %d\n", __func__, rc); - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev, out: mutex_unlock(&gdev->session_mutex); - kfree(req); + vbg_req_free(req, sizeof(*req)); return ret; } @@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) if (rc < 0) vbg_err("%s error, rc: %d\n", __func__, rc); - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, out: mutex_unlock(&gdev->session_mutex); - kfree(req); + vbg_req_free(req, sizeof(*req)); return ret; } @@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev) rc = vbg_req_perform(gdev, req); ret = vbg_status_code_to_errno(rc); - if (ret) + if (ret) { + vbg_err("%s error: %d\n", __func__, rc); goto out; + } snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", req->major, req->minor, req->build, req->revision); @@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev) } out: - kfree(req); + vbg_req_free(req, sizeof(*req)); return ret; } @@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) return 0; err_free_reqs: - kfree(gdev->mouse_status_req); - kfree(gdev->ack_events_req); - kfree(gdev->cancel_req); - kfree(gdev->mem_balloon.change_req); - kfree(gdev->mem_balloon.get_req); + vbg_req_free(gdev->mouse_status_req, + sizeof(*gdev->mouse_status_req)); + vbg_req_free(gdev->ack_events_req, + sizeof(*gdev->ack_events_req)); + vbg_req_free(gdev->cancel_req, + sizeof(*gdev->cancel_req)); + vbg_req_free(gdev->mem_balloon.change_req, + sizeof(*gdev->mem_balloon.change_req)); + vbg_req_free(gdev->mem_balloon.get_req, + sizeof(*gdev->mem_balloon.get_req)); return ret; } @@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev) vbg_reset_host_capabilities(gdev); vbg_core_set_mouse_status(gdev, 0); - kfree(gdev->mouse_status_req); - kfree(gdev->ack_events_req); - kfree(gdev->cancel_req); - kfree(gdev->mem_balloon.change_req); - kfree(gdev->mem_balloon.get_req); + vbg_req_free(gdev->mouse_status_req, + sizeof(*gdev->mouse_status_req)); + vbg_req_free(gdev->ack_events_req, + sizeof(*gdev->ack_events_req)); + vbg_req_free(gdev->cancel_req, + sizeof(*gdev->cancel_req)); + vbg_req_free(gdev->mem_balloon.change_req, + sizeof(*gdev->mem_balloon.change_req)); + vbg_req_free(gdev->mem_balloon.get_req, + sizeof(*gdev->mem_balloon.get_req)); } /** @@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, req->flags = dump->u.in.flags; dump->hdr.rc = vbg_req_perform(gdev, req); - kfree(req); + vbg_req_free(req, sizeof(*req)); return 0; } @@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) if (rc < 0) vbg_err("%s error, rc: %d\n", __func__, rc); - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 6c784bf4fa6d..7ad9ec45bfa9 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id); void vbg_linux_mouse_event(struct vbg_dev *gdev); +/* Private (non exported) functions form vboxguest_utils.c */ +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); +void vbg_req_free(void *req, size_t len); +int vbg_req_perform(struct vbg_dev *gdev, void *req); +int vbg_hgcm_call32( + struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, + struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, + int *vbox_status); + #endif diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 82e280d38cc2..398d22693234 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, struct vbg_session *session = filp->private_data; size_t returned_size, size; struct vbg_ioctl_hdr hdr; + bool is_vmmdev_req; int ret = 0; void *buf; @@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, if (size > SZ_16M) return -E2BIG; - /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ - buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); + /* + * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid + * the need for a bounce-buffer and another copy later on. + */ + is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG; + + if (is_vmmdev_req) + buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); + else + buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, ret = -EFAULT; out: - kfree(buf); + if (is_vmmdev_req) + vbg_req_free(buf, size); + else + kfree(buf); return ret; } diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 0f0dab8023cf..bf4474214b4d 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug); void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) { struct vmmdev_request_header *req; + int order = get_order(PAGE_ALIGN(len)); - req = kmalloc(len, GFP_KERNEL | __GFP_DMA32); + req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); if (!req) return NULL; @@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) return req; } +void vbg_req_free(void *req, size_t len) +{ + if (!req) + return; + + free_pages((unsigned long)req, get_order(PAGE_ALIGN(len))); +} + /* Note this function returns a VBox status code, not a negative errno!! */ int vbg_req_perform(struct vbg_dev *gdev, void *req) { @@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev, rc = hgcm_connect->header.result; } - kfree(hgcm_connect); + vbg_req_free(hgcm_connect, sizeof(*hgcm_connect)); *vbox_status = rc; return 0; @@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) if (rc >= 0) rc = hgcm_disconnect->header.result; - kfree(hgcm_disconnect); + vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect)); *vbox_status = rc; return 0; @@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, } if (!leak_it) - kfree(call); + vbg_req_free(call, size); free_bounce_bufs: if (bounce_bufs) { diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index a5b8eb21201f..1abe4d021fd2 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); #define WDT_CTRL_WDT_INTR BIT(2) #define WDT_CTRL_RESET_SYSTEM BIT(1) #define WDT_CTRL_ENABLE BIT(0) +#define WDT_TIMEOUT_STATUS 0x10 +#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) /* * WDT_RESET_WIDTH controls the characteristics of the external pulse (if @@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev) struct device_node *np; const char *reset_type; u32 duration; + u32 status; int ret; wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); @@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) writel(duration - 1, wdt->base + WDT_RESET_WIDTH); } + status = readl(wdt->base + WDT_TIMEOUT_STATUS); + if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) + wdt->wdd.bootstatus = WDIOF_CARDRESET; + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register\n"); diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index 6b8c6ddfe30b..514db5cc1595 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c @@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, } static const struct watchdog_info rwdt_ident = { - .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | + WDIOF_CARDRESET, .identity = "Renesas WDT Watchdog", }; @@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev) return PTR_ERR(clk); pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); priv->clk_rate = clk_get_rate(clk); + priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & + RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; pm_runtime_put(&pdev->dev); if (!priv->clk_rate) { diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 43d0cbb7ba0b..814cdf539b0f 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, if (sch311x_wdt_set_heartbeat(new_timeout)) return -EINVAL; sch311x_wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 20e2bba10400..672b61a7f9a3 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index db0da7ea4fd8..93c5b610e264 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, timeout = new_timeout; wafwdt_stop(); wafwdt_start(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c index 89d9744ece61..ed593d1042a6 100644 --- a/drivers/xen/xen-pciback/conf_space_quirks.c +++ b/drivers/xen/xen-pciback/conf_space_quirks.c @@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev) struct xen_pcibk_config_quirk *quirk; int ret = 0; - quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); + quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); if (!quirk) { ret = -ENOMEM; goto out; diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9e480fdebe1f..59661db144e5 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) dev_dbg(&dev->dev, "pcistub_device_alloc\n"); - psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); + psdev = kzalloc(sizeof(*psdev), GFP_KERNEL); if (!psdev) return NULL; @@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev) * here and then to call kfree(pci_get_drvdata(psdev->dev)). */ dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") - + strlen(pci_name(dev)) + 1, GFP_ATOMIC); + + strlen(pci_name(dev)) + 1, GFP_KERNEL); if (!dev_data) { err = -ENOMEM; goto out; @@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (!match) { - pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); + pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); if (!pci_dev_id) { err = -ENOMEM; goto out; @@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, } dev = psdev->dev; - field = kzalloc(sizeof(*field), GFP_ATOMIC); + field = kzalloc(sizeof(*field), GFP_KERNEL); if (!field) { err = -ENOMEM; goto out; diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0d6d9264d6a9..c3e201025ef0 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, { struct { struct xsd_sockmsg hdr; - const char body[16]; + char body[16]; } msg; int rc; @@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, msg.hdr.len = strlen(reply) + 1; if (msg.hdr.len > sizeof(msg.body)) return -E2BIG; + memcpy(&msg.body, reply, msg.hdr.len); mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); diff --git a/fs/afs/server.c b/fs/afs/server.c index e23be63998a8..629c74986cff 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -428,8 +428,15 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) } write_sequnlock(&net->fs_lock); - if (deleted) + if (deleted) { + write_seqlock(&net->fs_addr_lock); + if (!hlist_unhashed(&server->addr4_link)) + hlist_del_rcu(&server->addr4_link); + if (!hlist_unhashed(&server->addr6_link)) + hlist_del_rcu(&server->addr6_link); + write_sequnlock(&net->fs_addr_lock); afs_destroy_server(net, server); + } } } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 82e8f6edfb48..b12e37f27530 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir, autofs4_del_active(dentry); - inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); + inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode); if (!inode) return -ENOMEM; d_add(dentry, inode); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 41e04183e4ce..4ad6f669fe34 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, } else map_addr = vm_mmap(filep, addr, size, prot, type, off); - if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) - pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", - task_pid_nr(current), current->comm, - (void *)addr); + if ((type & MAP_FIXED_NOREPLACE) && + PTR_ERR((void *)map_addr) == -EEXIST) + pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", + task_pid_nr(current), current->comm, (void *)addr); return(map_addr); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5474ef14d6e6..2771cc56a622 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -459,6 +459,25 @@ struct btrfs_block_rsv { unsigned short full; unsigned short type; unsigned short failfast; + + /* + * Qgroup equivalent for @size @reserved + * + * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care + * about things like csum size nor how many tree blocks it will need to + * reserve. + * + * Qgroup cares more about net change of the extent usage. + * + * So for one newly inserted file extent, in worst case it will cause + * leaf split and level increase, nodesize for each file extent is + * already too much. + * + * In short, qgroup_size/reserved is the upper limit of possible needed + * qgroup metadata reservation. + */ + u64 qgroup_rsv_size; + u64 qgroup_rsv_reserved; }; /* @@ -714,6 +733,12 @@ struct btrfs_delayed_root; */ #define BTRFS_FS_EXCL_OP 16 +/* + * To info transaction_kthread we need an immediate commit so it doesn't + * need to wait for commit_interval + */ +#define BTRFS_FS_NEED_ASYNC_COMMIT 17 + struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 06ec8ab6d9ba..a8d492dbd3e7 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -556,6 +556,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, dst_rsv = &fs_info->delayed_block_rsv; num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + + /* + * Here we migrate space rsv from transaction rsv, since have already + * reserved space when starting a transaction. So no need to reserve + * qgroup space here. + */ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); if (!ret) { trace_btrfs_space_reservation(fs_info, "delayed_item", @@ -577,7 +583,10 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, return; rsv = &fs_info->delayed_block_rsv; - btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved); + /* + * Check btrfs_delayed_item_reserve_metadata() to see why we don't need + * to release/reserve qgroup space. + */ trace_btrfs_space_reservation(fs_info, "delayed_item", item->key.objectid, item->bytes_reserved, 0); @@ -602,9 +611,6 @@ static int btrfs_delayed_inode_reserve_metadata( num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); - if (ret < 0) - return ret; /* * btrfs_dirty_inode will update the inode under btrfs_join_transaction * which doesn't reserve space for speed. This is a problem since we @@ -616,6 +622,10 @@ static int btrfs_delayed_inode_reserve_metadata( */ if (!src_rsv || (!trans->bytes_reserved && src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { + ret = btrfs_qgroup_reserve_meta_prealloc(root, + fs_info->nodesize, true); + if (ret < 0) + return ret; ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, BTRFS_RESERVE_NO_FLUSH); /* @@ -634,6 +644,8 @@ static int btrfs_delayed_inode_reserve_metadata( "delayed_inode", btrfs_ino(inode), num_bytes, 1); + } else { + btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize); } return ret; } diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 9e98295de7ce..e1b0651686f7 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -540,8 +540,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head *head_ref, struct btrfs_qgroup_extent_record *qrecord, u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, - int action, int is_data, int *qrecord_inserted_ret, + int action, int is_data, int is_system, + int *qrecord_inserted_ret, int *old_ref_mod, int *new_ref_mod) + { struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_root *delayed_refs; @@ -585,6 +587,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, head_ref->ref_mod = count_mod; head_ref->must_insert_reserved = must_insert_reserved; head_ref->is_data = is_data; + head_ref->is_system = is_system; head_ref->ref_tree = RB_ROOT; INIT_LIST_HEAD(&head_ref->ref_add_list); RB_CLEAR_NODE(&head_ref->href_node); @@ -772,6 +775,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; int qrecord_inserted; + int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID); BUG_ON(extent_op && extent_op->is_data); ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); @@ -800,8 +804,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, */ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, bytenr, num_bytes, 0, 0, action, 0, - &qrecord_inserted, old_ref_mod, - new_ref_mod); + is_system, &qrecord_inserted, + old_ref_mod, new_ref_mod); add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, num_bytes, parent, ref_root, level, action); @@ -868,7 +872,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, */ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, bytenr, num_bytes, ref_root, reserved, - action, 1, &qrecord_inserted, + action, 1, 0, &qrecord_inserted, old_ref_mod, new_ref_mod); add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, @@ -898,9 +902,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); + /* + * extent_ops just modify the flags of an extent and they don't result + * in ref count changes, hence it's safe to pass false/0 for is_system + * argument + */ add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr, num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, - extent_op->is_data, NULL, NULL, NULL); + extent_op->is_data, 0, NULL, NULL, NULL); spin_unlock(&delayed_refs->lock); return 0; diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 741869dbc316..7f00db50bd24 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -127,6 +127,7 @@ struct btrfs_delayed_ref_head { */ unsigned int must_insert_reserved:1; unsigned int is_data:1; + unsigned int is_system:1; unsigned int processing:1; }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4ac8b1d21baf..60caa68c3618 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1824,6 +1824,7 @@ static int transaction_kthread(void *arg) now = get_seconds(); if (cur->state < TRANS_STATE_BLOCKED && + !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && (now < cur->start_time || now - cur->start_time < fs_info->commit_interval)) { spin_unlock(&fs_info->trans_lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 75cfb80d2551..51b5e2da708c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2601,13 +2601,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, trace_run_delayed_ref_head(fs_info, head, 0); if (head->total_ref_mod < 0) { - struct btrfs_block_group_cache *cache; + struct btrfs_space_info *space_info; + u64 flags; - cache = btrfs_lookup_block_group(fs_info, head->bytenr); - ASSERT(cache); - percpu_counter_add(&cache->space_info->total_bytes_pinned, + if (head->is_data) + flags = BTRFS_BLOCK_GROUP_DATA; + else if (head->is_system) + flags = BTRFS_BLOCK_GROUP_SYSTEM; + else + flags = BTRFS_BLOCK_GROUP_METADATA; + space_info = __find_space_info(fs_info, flags); + ASSERT(space_info); + percpu_counter_add(&space_info->total_bytes_pinned, -head->num_bytes); - btrfs_put_block_group(cache); if (head->is_data) { spin_lock(&delayed_refs->lock); @@ -3136,7 +3142,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root, struct rb_node *node; int ret = 0; + spin_lock(&root->fs_info->trans_lock); cur_trans = root->fs_info->running_transaction; + if (cur_trans) + refcount_inc(&cur_trans->use_count); + spin_unlock(&root->fs_info->trans_lock); if (!cur_trans) return 0; @@ -3145,6 +3155,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (!head) { spin_unlock(&delayed_refs->lock); + btrfs_put_transaction(cur_trans); return 0; } @@ -3161,6 +3172,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); + btrfs_put_transaction(cur_trans); return -EAGAIN; } spin_unlock(&delayed_refs->lock); @@ -3193,6 +3205,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, } spin_unlock(&head->lock); mutex_unlock(&head->mutex); + btrfs_put_transaction(cur_trans); return ret; } @@ -5559,14 +5572,18 @@ again: static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, - struct btrfs_block_rsv *dest, u64 num_bytes) + struct btrfs_block_rsv *dest, u64 num_bytes, + u64 *qgroup_to_release_ret) { struct btrfs_space_info *space_info = block_rsv->space_info; + u64 qgroup_to_release = 0; u64 ret; spin_lock(&block_rsv->lock); - if (num_bytes == (u64)-1) + if (num_bytes == (u64)-1) { num_bytes = block_rsv->size; + qgroup_to_release = block_rsv->qgroup_rsv_size; + } block_rsv->size -= num_bytes; if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; @@ -5575,6 +5592,13 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, } else { num_bytes = 0; } + if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { + qgroup_to_release = block_rsv->qgroup_rsv_reserved - + block_rsv->qgroup_rsv_size; + block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size; + } else { + qgroup_to_release = 0; + } spin_unlock(&block_rsv->lock); ret = num_bytes; @@ -5597,6 +5621,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, space_info_add_old_bytes(fs_info, space_info, num_bytes); } + if (qgroup_to_release_ret) + *qgroup_to_release_ret = qgroup_to_release; return ret; } @@ -5738,17 +5764,21 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = &inode->block_rsv; u64 num_bytes = 0; + u64 qgroup_num_bytes = 0; int ret = -ENOSPC; spin_lock(&block_rsv->lock); if (block_rsv->reserved < block_rsv->size) num_bytes = block_rsv->size - block_rsv->reserved; + if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size) + qgroup_num_bytes = block_rsv->qgroup_rsv_size - + block_rsv->qgroup_rsv_reserved; spin_unlock(&block_rsv->lock); if (num_bytes == 0) return 0; - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); + ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true); if (ret) return ret; ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); @@ -5756,7 +5786,13 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, block_rsv_add_bytes(block_rsv, num_bytes, 0); trace_btrfs_space_reservation(root->fs_info, "delalloc", btrfs_ino(inode), num_bytes, 1); - } + + /* Don't forget to increase qgroup_rsv_reserved */ + spin_lock(&block_rsv->lock); + block_rsv->qgroup_rsv_reserved += qgroup_num_bytes; + spin_unlock(&block_rsv->lock); + } else + btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); return ret; } @@ -5777,20 +5813,23 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free) struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_block_rsv *block_rsv = &inode->block_rsv; u64 released = 0; + u64 qgroup_to_release = 0; /* * Since we statically set the block_rsv->size we just want to say we * are releasing 0 bytes, and then we'll just get the reservation over * the size free'd. */ - released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0); + released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0, + &qgroup_to_release); if (released > 0) trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), released, 0); if (qgroup_free) - btrfs_qgroup_free_meta_prealloc(inode->root, released); + btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release); else - btrfs_qgroup_convert_reserved_meta(inode->root, released); + btrfs_qgroup_convert_reserved_meta(inode->root, + qgroup_to_release); } void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, @@ -5802,7 +5841,7 @@ void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, if (global_rsv == block_rsv || block_rsv->space_info != global_rsv->space_info) global_rsv = NULL; - block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes); + block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL); } static void update_global_block_rsv(struct btrfs_fs_info *fs_info) @@ -5882,7 +5921,7 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info) static void release_global_block_rsv(struct btrfs_fs_info *fs_info) { block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, - (u64)-1); + (u64)-1, NULL); WARN_ON(fs_info->trans_block_rsv.size > 0); WARN_ON(fs_info->trans_block_rsv.reserved > 0); WARN_ON(fs_info->chunk_block_rsv.size > 0); @@ -5906,7 +5945,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) WARN_ON_ONCE(!list_empty(&trans->new_bgs)); block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL, - trans->chunk_bytes_reserved); + trans->chunk_bytes_reserved, NULL); trans->chunk_bytes_reserved = 0; } @@ -6011,6 +6050,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, { struct btrfs_block_rsv *block_rsv = &inode->block_rsv; u64 reserve_size = 0; + u64 qgroup_rsv_size = 0; u64 csum_leaves; unsigned outstanding_extents; @@ -6023,9 +6063,17 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, inode->csum_bytes); reserve_size += btrfs_calc_trans_metadata_size(fs_info, csum_leaves); + /* + * For qgroup rsv, the calculation is very simple: + * account one nodesize for each outstanding extent + * + * This is overestimating in most cases. + */ + qgroup_rsv_size = outstanding_extents * fs_info->nodesize; spin_lock(&block_rsv->lock); block_rsv->size = reserve_size; + block_rsv->qgroup_rsv_size = qgroup_rsv_size; spin_unlock(&block_rsv->lock); } @@ -8403,7 +8451,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, u32 blocksize) { block_rsv_add_bytes(block_rsv, blocksize, 0); - block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); + block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL); } /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0167a9c97c9c..f660ba1e5e58 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1748,7 +1748,7 @@ again: unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, - (ret != 0)); + true); if (ret) { btrfs_drop_pages(pages, num_pages); break; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e064c49c9a9a..d241285a0d2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -31,6 +31,7 @@ #include <linux/uio.h> #include <linux/magic.h> #include <linux/iversion.h> +#include <asm/unaligned.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -5905,11 +5906,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) struct dir_entry *entry = addr; char *name = (char *)(entry + 1); - ctx->pos = entry->offset; - if (!dir_emit(ctx, name, entry->name_len, entry->ino, - entry->type)) + ctx->pos = get_unaligned(&entry->offset); + if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), + get_unaligned(&entry->ino), + get_unaligned(&entry->type))) return 1; - addr += sizeof(struct dir_entry) + entry->name_len; + addr += sizeof(struct dir_entry) + + get_unaligned(&entry->name_len); ctx->pos++; } return 0; @@ -5999,14 +6002,15 @@ again: } entry = addr; - entry->name_len = name_len; + put_unaligned(name_len, &entry->name_len); name_ptr = (char *)(entry + 1); read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), name_len); - entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; + put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)], + &entry->type); btrfs_dir_item_key_to_cpu(leaf, di, &location); - entry->ino = location.objectid; - entry->offset = found_key.offset; + put_unaligned(location.objectid, &entry->ino); + put_unaligned(found_key.offset, &entry->offset); entries++; addr += sizeof(struct dir_entry) + name_len; total_len += sizeof(struct dir_entry) + name_len; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 124276bba8cf..21a831d3d087 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -189,9 +189,10 @@ void btrfs_print_leaf(struct extent_buffer *l) fs_info = l->fs_info; nr = btrfs_header_nritems(l); - btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d", - btrfs_header_bytenr(l), nr, - btrfs_leaf_free_space(fs_info, l)); + btrfs_info(fs_info, + "leaf %llu gen %llu total ptrs %d free space %d owner %llu", + btrfs_header_bytenr(l), btrfs_header_generation(l), nr, + btrfs_leaf_free_space(fs_info, l), btrfs_header_owner(l)); for (i = 0 ; i < nr ; i++) { item = btrfs_item_nr(i); btrfs_item_key_to_cpu(l, &key, i); @@ -325,7 +326,7 @@ void btrfs_print_leaf(struct extent_buffer *l) } } -void btrfs_print_tree(struct extent_buffer *c) +void btrfs_print_tree(struct extent_buffer *c, bool follow) { struct btrfs_fs_info *fs_info; int i; u32 nr; @@ -342,15 +343,19 @@ void btrfs_print_tree(struct extent_buffer *c) return; } btrfs_info(fs_info, - "node %llu level %d total ptrs %d free spc %u", - btrfs_header_bytenr(c), level, nr, - (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr); + "node %llu level %d gen %llu total ptrs %d free spc %u owner %llu", + btrfs_header_bytenr(c), level, btrfs_header_generation(c), + nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr, + btrfs_header_owner(c)); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); - pr_info("\tkey %d (%llu %u %llu) block %llu\n", + pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n", i, key.objectid, key.type, key.offset, - btrfs_node_blockptr(c, i)); + btrfs_node_blockptr(c, i), + btrfs_node_ptr_generation(c, i)); } + if (!follow) + return; for (i = 0; i < nr; i++) { struct btrfs_key first_key; struct extent_buffer *next; @@ -372,7 +377,7 @@ void btrfs_print_tree(struct extent_buffer *c) if (btrfs_header_level(next) != level - 1) BUG(); - btrfs_print_tree(next); + btrfs_print_tree(next, follow); free_extent_buffer(next); } } diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h index 4a98481688f4..e6bb38fd75ad 100644 --- a/fs/btrfs/print-tree.h +++ b/fs/btrfs/print-tree.h @@ -7,6 +7,6 @@ #define BTRFS_PRINT_TREE_H void btrfs_print_leaf(struct extent_buffer *l); -void btrfs_print_tree(struct extent_buffer *c); +void btrfs_print_tree(struct extent_buffer *c, bool follow); #endif diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 09c7e4fd550f..9fb758d5077a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/btrfs.h> +#include <linux/sizes.h> #include "ctree.h" #include "transaction.h" @@ -2375,8 +2376,21 @@ out: return ret; } -static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) +/* + * Two limits to commit transaction in advance. + * + * For RATIO, it will be 1/RATIO of the remaining limit + * (excluding data and prealloc meta) as threshold. + * For SIZE, it will be in byte unit as threshold. + */ +#define QGROUP_PERTRANS_RATIO 32 +#define QGROUP_PERTRANS_SIZE SZ_32M +static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, + const struct btrfs_qgroup *qg, u64 num_bytes) { + u64 limit; + u64 threshold; + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) return false; @@ -2385,6 +2399,31 @@ static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) return false; + /* + * Even if we passed the check, it's better to check if reservation + * for meta_pertrans is pushing us near limit. + * If there is too much pertrans reservation or it's near the limit, + * let's try commit transaction to free some, using transaction_kthread + */ + if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | + BTRFS_QGROUP_LIMIT_MAX_EXCL))) { + if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) + limit = qg->max_excl; + else + limit = qg->max_rfer; + threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] - + qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) / + QGROUP_PERTRANS_RATIO; + threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE); + + /* + * Use transaction_kthread to commit transaction, so we no + * longer need to bother nested transaction nor lock context. + */ + if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold) + btrfs_commit_transaction_locksafe(fs_info); + } + return true; } @@ -2434,7 +2473,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, qg = unode_aux_to_qgroup(unode); - if (enforce && !qgroup_check_limits(qg, num_bytes)) { + if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { ret = -EDQUOT; goto out; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 00b7d3231821..b041b945a7ae 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1841,7 +1841,7 @@ again: old_bytenr = btrfs_node_blockptr(parent, slot); blocksize = fs_info->nodesize; old_ptr_gen = btrfs_node_ptr_generation(parent, slot); - btrfs_node_key_to_cpu(parent, &key, slot); + btrfs_node_key_to_cpu(parent, &first_key, slot); if (level <= max_level) { eb = path->nodes[level]; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 221e5cdb060b..c0074d2d7d6d 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5236,6 +5236,10 @@ static int send_write_or_clone(struct send_ctx *sctx, len = btrfs_file_extent_num_bytes(path->nodes[0], ei); } + if (offset >= sctx->cur_inode_size) { + ret = 0; + goto out; + } if (offset + len > sctx->cur_inode_size) len = sctx->cur_inode_size - offset; if (len == 0) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 63fdcab64b01..c944b4769e3c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2267,6 +2267,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) */ cur_trans->state = TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); + clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); spin_lock(&fs_info->trans_lock); list_del_init(&cur_trans->list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index c88fccd80bc5..d8c0826bc2c7 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -199,6 +199,20 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans); int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); + +/* + * Try to commit transaction asynchronously, so this is safe to call + * even holding a spinlock. + * + * It's done by informing transaction_kthread to commit transaction without + * waiting for commit interval. + */ +static inline void btrfs_commit_transaction_locksafe( + struct btrfs_fs_info *fs_info) +{ + set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); + wake_up_process(fs_info->transaction_kthread); +} int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); void btrfs_throttle(struct btrfs_fs_info *fs_info); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index f85040d73e3d..cf0e45b10121 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -70,69 +70,104 @@ static __le32 ceph_flags_sys2wire(u32 flags) */ /* - * Calculate the length sum of direct io vectors that can - * be combined into one page vector. + * How many pages to get in one call to iov_iter_get_pages(). This + * determines the size of the on-stack array used as a buffer. */ -static size_t dio_get_pagev_size(const struct iov_iter *it) +#define ITER_GET_BVECS_PAGES 64 + +static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize, + struct bio_vec *bvecs) { - const struct iovec *iov = it->iov; - const struct iovec *iovend = iov + it->nr_segs; - size_t size; - - size = iov->iov_len - it->iov_offset; - /* - * An iov can be page vectored when both the current tail - * and the next base are page aligned. - */ - while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && - (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { - size += iov->iov_len; - } - dout("dio_get_pagevlen len = %zu\n", size); - return size; + size_t size = 0; + int bvec_idx = 0; + + if (maxsize > iov_iter_count(iter)) + maxsize = iov_iter_count(iter); + + while (size < maxsize) { + struct page *pages[ITER_GET_BVECS_PAGES]; + ssize_t bytes; + size_t start; + int idx = 0; + + bytes = iov_iter_get_pages(iter, pages, maxsize - size, + ITER_GET_BVECS_PAGES, &start); + if (bytes < 0) + return size ?: bytes; + + iov_iter_advance(iter, bytes); + size += bytes; + + for ( ; bytes; idx++, bvec_idx++) { + struct bio_vec bv = { + .bv_page = pages[idx], + .bv_len = min_t(int, bytes, PAGE_SIZE - start), + .bv_offset = start, + }; + + bvecs[bvec_idx] = bv; + bytes -= bv.bv_len; + start = 0; + } + } + + return size; } /* - * Allocate a page vector based on (@it, @nbytes). - * The return value is the tuple describing a page vector, - * that is (@pages, @page_align, @num_pages). + * iov_iter_get_pages() only considers one iov_iter segment, no matter + * what maxsize or maxpages are given. For ITER_BVEC that is a single + * page. + * + * Attempt to get up to @maxsize bytes worth of pages from @iter. + * Return the number of bytes in the created bio_vec array, or an error. */ -static struct page ** -dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, - size_t *page_align, int *num_pages) +static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize, + struct bio_vec **bvecs, int *num_bvecs) { - struct iov_iter tmp_it = *it; - size_t align; - struct page **pages; - int ret = 0, idx, npages; + struct bio_vec *bv; + size_t orig_count = iov_iter_count(iter); + ssize_t bytes; + int npages; - align = (unsigned long)(it->iov->iov_base + it->iov_offset) & - (PAGE_SIZE - 1); - npages = calc_pages_for(align, nbytes); - pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL); - if (!pages) - return ERR_PTR(-ENOMEM); + iov_iter_truncate(iter, maxsize); + npages = iov_iter_npages(iter, INT_MAX); + iov_iter_reexpand(iter, orig_count); - for (idx = 0; idx < npages; ) { - size_t start; - ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, - npages - idx, &start); - if (ret < 0) - goto fail; + /* + * __iter_get_bvecs() may populate only part of the array -- zero it + * out. + */ + bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO); + if (!bv) + return -ENOMEM; - iov_iter_advance(&tmp_it, ret); - nbytes -= ret; - idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; + bytes = __iter_get_bvecs(iter, maxsize, bv); + if (bytes < 0) { + /* + * No pages were pinned -- just free the array. + */ + kvfree(bv); + return bytes; } - BUG_ON(nbytes != 0); - *num_pages = npages; - *page_align = align; - dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); - return pages; -fail: - ceph_put_page_vector(pages, idx, false); - return ERR_PTR(ret); + *bvecs = bv; + *num_bvecs = npages; + return bytes; +} + +static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty) +{ + int i; + + for (i = 0; i < num_bvecs; i++) { + if (bvecs[i].bv_page) { + if (should_dirty) + set_page_dirty_lock(bvecs[i].bv_page); + put_page(bvecs[i].bv_page); + } + } + kvfree(bvecs); } /* @@ -746,11 +781,12 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) struct inode *inode = req->r_inode; struct ceph_aio_request *aio_req = req->r_priv; struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); - int num_pages = calc_pages_for((u64)osd_data->alignment, - osd_data->length); - dout("ceph_aio_complete_req %p rc %d bytes %llu\n", - inode, rc, osd_data->length); + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS); + BUG_ON(!osd_data->num_bvecs); + + dout("ceph_aio_complete_req %p rc %d bytes %u\n", + inode, rc, osd_data->bvec_pos.iter.bi_size); if (rc == -EOLDSNAPC) { struct ceph_aio_work *aio_work; @@ -768,9 +804,10 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) } else if (!aio_req->write) { if (rc == -ENOENT) rc = 0; - if (rc >= 0 && osd_data->length > rc) { - int zoff = osd_data->alignment + rc; - int zlen = osd_data->length - rc; + if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) { + struct iov_iter i; + int zlen = osd_data->bvec_pos.iter.bi_size - rc; + /* * If read is satisfied by single OSD request, * it can pass EOF. Otherwise read is within @@ -785,13 +822,16 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) aio_req->total_len = rc + zlen; } - if (zlen > 0) - ceph_zero_page_vector_range(zoff, zlen, - osd_data->pages); + iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs, + osd_data->num_bvecs, + osd_data->bvec_pos.iter.bi_size); + iov_iter_advance(&i, rc); + iov_iter_zero(zlen, &i); } } - ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty); + put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs, + aio_req->should_dirty); ceph_osdc_put_request(req); if (rc < 0) @@ -879,7 +919,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_vino vino; struct ceph_osd_request *req; - struct page **pages; + struct bio_vec *bvecs; struct ceph_aio_request *aio_req = NULL; int num_pages = 0; int flags; @@ -914,10 +954,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, } while (iov_iter_count(iter) > 0) { - u64 size = dio_get_pagev_size(iter); - size_t start = 0; + u64 size = iov_iter_count(iter); ssize_t len; + if (write) + size = min_t(u64, size, fsc->mount_options->wsize); + else + size = min_t(u64, size, fsc->mount_options->rsize); + vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, pos, &size, 0, @@ -933,18 +977,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, break; } - if (write) - size = min_t(u64, size, fsc->mount_options->wsize); - else - size = min_t(u64, size, fsc->mount_options->rsize); - - len = size; - pages = dio_get_pages_alloc(iter, len, &start, &num_pages); - if (IS_ERR(pages)) { + len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages); + if (len < 0) { ceph_osdc_put_request(req); - ret = PTR_ERR(pages); + ret = len; break; } + if (len != size) + osd_req_op_extent_update(req, 0, len); /* * To simplify error handling, allow AIO when IO within i_size @@ -977,8 +1017,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, req->r_mtime = mtime; } - osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, - false, false); + osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len); if (aio_req) { aio_req->total_len += len; @@ -991,7 +1030,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); pos += len; - iov_iter_advance(iter, len); continue; } @@ -1004,25 +1042,26 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, if (ret == -ENOENT) ret = 0; if (ret >= 0 && ret < len && pos + ret < size) { + struct iov_iter i; int zlen = min_t(size_t, len - ret, size - pos - ret); - ceph_zero_page_vector_range(start + ret, zlen, - pages); + + iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages, + len); + iov_iter_advance(&i, ret); + iov_iter_zero(zlen, &i); ret += zlen; } if (ret >= 0) len = ret; } - ceph_put_page_vector(pages, num_pages, should_dirty); - + put_bvecs(bvecs, num_pages, should_dirty); ceph_osdc_put_request(req); if (ret < 0) break; pos += len; - iov_iter_advance(iter, len); - if (!write && pos >= size) break; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 8bf60250309e..ae056927080d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -669,13 +669,15 @@ void ceph_fill_file_time(struct inode *inode, int issued, CEPH_CAP_FILE_BUFFER| CEPH_CAP_AUTH_EXCL| CEPH_CAP_XATTR_EXCL)) { - if (timespec_compare(ctime, &inode->i_ctime) > 0) { + if (ci->i_version == 0 || + timespec_compare(ctime, &inode->i_ctime) > 0) { dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, ctime->tv_sec, ctime->tv_nsec); inode->i_ctime = *ctime; } - if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { + if (ci->i_version == 0 || + ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { /* the MDS did a utimes() */ dout("mtime %ld.%09ld -> %ld.%09ld " "tw %d -> %d\n", @@ -795,7 +797,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page, new_issued = ~issued & le32_to_cpu(info->cap.caps); /* update inode */ - ci->i_version = le64_to_cpu(info->version); inode->i_rdev = le32_to_cpu(info->rdev); inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; @@ -868,6 +869,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page, xattr_blob = NULL; } + /* finally update i_version */ + ci->i_version = le64_to_cpu(info->version); + inode->i_mapping->a_ops = &ceph_aops; switch (inode->i_mode & S_IFMT) { diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 7e72348639e4..315f7e63e7cc 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -228,7 +228,15 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) { - return (ci->i_max_files || ci->i_max_bytes); + bool ret = false; + spin_lock(&ci->i_ceph_lock); + if ((ci->i_max_files || ci->i_max_bytes) && + ci->i_vino.snap == CEPH_NOSNAP && + ci->i_snap_realm && + ci->i_snap_realm->ino == ci->i_vino.ino) + ret = true; + spin_unlock(&ci->i_ceph_lock); + return ret; } static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, @@ -1008,14 +1016,19 @@ int __ceph_setxattr(struct inode *inode, const char *name, char *newval = NULL; struct ceph_inode_xattr *xattr = NULL; int required_blob_size; + bool check_realm = false; bool lock_snap_rwsem = false; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; vxattr = ceph_match_vxattr(inode, name); - if (vxattr && vxattr->readonly) - return -EOPNOTSUPP; + if (vxattr) { + if (vxattr->readonly) + return -EOPNOTSUPP; + if (value && !strncmp(vxattr->name, "ceph.quota", 10)) + check_realm = true; + } /* pass any unhandled ceph.* xattrs through to the MDS */ if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) @@ -1109,6 +1122,15 @@ do_sync_unlocked: err = -EBUSY; } else { err = ceph_sync_setxattr(inode, name, value, size, flags); + if (err >= 0 && check_realm) { + /* check if snaprealm was created for quota inode */ + spin_lock(&ci->i_ceph_lock); + if ((ci->i_max_files || ci->i_max_bytes) && + !(ci->i_snap_realm && + ci->i_snap_realm->ino == ci->i_vino.ino)) + err = -EOPNOTSUPP; + spin_unlock(&ci->i_ceph_lock); + } } out: ceph_free_cap_flush(prealloc_cf); diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 741749a98614..5f132d59dfc2 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -197,7 +197,7 @@ config CIFS_SMB311 config CIFS_SMB_DIRECT bool "SMB Direct support (Experimental)" - depends on CIFS=m && INFINIBAND || CIFS=y && INFINIBAND=y + depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y help Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. SMB Direct allows transferring SMB packets over RDMA. If unsure, diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index fe5567655662..0e74690d11bc 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h @@ -54,7 +54,7 @@ do { \ pr_debug_ ## ratefunc("%s: " \ fmt, __FILE__, ##__VA_ARGS__); \ } else if ((type) & VFS) { \ - pr_err_ ## ratefunc("CuIFS VFS: " \ + pr_err_ ## ratefunc("CIFS VFS: " \ fmt, ##__VA_ARGS__); \ } else if ((type) & NOISY && (NOISY != 0)) { \ pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \ diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f715609b13f3..5a5a0158cc8f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1047,6 +1047,18 @@ out: return rc; } +/* + * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() + * is a dummy operation. + */ +static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", + file, datasync); + + return 0; +} + static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, struct file *dst_file, loff_t destoff, size_t len, unsigned int flags) @@ -1181,6 +1193,7 @@ const struct file_operations cifs_dir_ops = { .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = generic_file_llseek, + .fsync = cifs_dir_fsync, }; static void diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 6d3e40d7029c..1529a088383d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -455,6 +455,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) server->sign = true; } + if (cifs_rdma_enabled(server) && server->sign) + cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled"); + return 0; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e8830f076a7f..7a10a5d0731f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1977,14 +1977,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, goto cifs_parse_mount_err; } -#ifdef CONFIG_CIFS_SMB_DIRECT - if (vol->rdma && vol->sign) { - cifs_dbg(VFS, "Currently SMB direct doesn't support signing." - " This is being fixed\n"); - goto cifs_parse_mount_err; - } -#endif - #ifndef CONFIG_KEYS /* Muliuser mounts require CONFIG_KEYS support */ if (vol->multiuser) { @@ -2959,6 +2951,22 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) } } + if (volume_info->seal) { + if (ses->server->vals->protocol_id == 0) { + cifs_dbg(VFS, + "SMB3 or later required for encryption\n"); + rc = -EOPNOTSUPP; + goto out_fail; + } else if (tcon->ses->server->capabilities & + SMB2_GLOBAL_CAP_ENCRYPTION) + tcon->seal = true; + else { + cifs_dbg(VFS, "Encryption is not supported on share\n"); + rc = -EOPNOTSUPP; + goto out_fail; + } + } + /* * BB Do we need to wrap session_mutex around this TCon call and Unix * SetFS as we do on SessSetup and reconnect? @@ -3007,22 +3015,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) tcon->use_resilient = true; } - if (volume_info->seal) { - if (ses->server->vals->protocol_id == 0) { - cifs_dbg(VFS, - "SMB3 or later required for encryption\n"); - rc = -EOPNOTSUPP; - goto out_fail; - } else if (tcon->ses->server->capabilities & - SMB2_GLOBAL_CAP_ENCRYPTION) - tcon->seal = true; - else { - cifs_dbg(VFS, "Encryption is not supported on share\n"); - rc = -EOPNOTSUPP; - goto out_fail; - } - } - /* * We can have only one retry value for a connection to a share so for * resources mounted more than once to the same server share the last diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 81ba6e0d88d8..925844343038 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, goto mknod_out; } + if (!S_ISCHR(mode) && !S_ISBLK(mode)) + goto mknod_out; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) goto mknod_out; @@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { - kfree(full_path); rc = -ENOMEM; - free_xid(xid); - return rc; + goto mknod_out; } if (backup_cred(cifs_sb)) @@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, pdev->minor = cpu_to_le64(MINOR(device_number)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); - } /* else if (S_ISFIFO) */ + } tcon->ses->server->ops->close(xid, tcon, &fid); d_drop(direntry); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4bcd4e838b47..23fd430fe74a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3462,7 +3462,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) * If the page is mmap'ed into a process' page tables, then we need to make * sure that it doesn't change while being written back. */ -static int +static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b4ae932ea134..9c6d95ffca97 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); #ifdef CONFIG_CIFS_SMB_DIRECT - if (server->rdma) - wsize = min_t(unsigned int, + if (server->rdma) { + if (server->sign) + wsize = min_t(unsigned int, + wsize, server->smbd_conn->max_fragmented_send_size); + else + wsize = min_t(unsigned int, wsize, server->smbd_conn->max_readwrite_size); + } #endif if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); @@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read); #ifdef CONFIG_CIFS_SMB_DIRECT - if (server->rdma) - rsize = min_t(unsigned int, + if (server->rdma) { + if (server->sign) + rsize = min_t(unsigned int, + rsize, server->smbd_conn->max_fragmented_recv_size); + else + rsize = min_t(unsigned int, rsize, server->smbd_conn->max_readwrite_size); + } #endif if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) @@ -579,9 +589,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + /* + * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's + * not an error. Otherwise, the specified ea_name was not found. + */ if (!rc) rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data, SMB2_MAX_EA_BUF, ea_name); + else if (!ea_name && rc == -ENODATA) + rc = 0; kfree(smb2_data); return rc; @@ -1452,7 +1468,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_open_parms oparms; struct cifs_fid fid; struct kvec err_iov = {NULL, 0}; - struct smb2_err_rsp *err_buf = NULL; + struct smb2_err_rsp *err_buf; struct smb2_symlink_err_rsp *symlink; unsigned int sub_len; unsigned int sub_offset; @@ -1476,7 +1492,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov); - if (!rc || !err_buf) { + if (!rc || !err_iov.iov_base) { kfree(utf16_path); return -ENOENT; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 0f044c4a2dc9..0f48741a0130 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -383,10 +383,10 @@ static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; - pneg_ctxt->DataLength = cpu_to_le16(6); - pneg_ctxt->CipherCount = cpu_to_le16(2); - pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; - pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; + pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */ + pneg_ctxt->CipherCount = cpu_to_le16(1); +/* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */ + pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM; } static void @@ -444,6 +444,7 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server, return -EINVAL; } server->cipher_type = ctxt->Ciphers[0]; + server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; return 0; } @@ -729,19 +730,14 @@ neg_exit: int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) { - int rc = 0; - struct validate_negotiate_info_req vneg_inbuf; + int rc; + struct validate_negotiate_info_req *pneg_inbuf; struct validate_negotiate_info_rsp *pneg_rsp = NULL; u32 rsplen; u32 inbuflen; /* max of 4 dialects */ cifs_dbg(FYI, "validate negotiate\n"); -#ifdef CONFIG_CIFS_SMB_DIRECT - if (tcon->ses->server->rdma) - return 0; -#endif - /* In SMB3.11 preauth integrity supersedes validate negotiate */ if (tcon->ses->server->dialect == SMB311_PROT_ID) return 0; @@ -764,63 +760,69 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); - vneg_inbuf.Capabilities = + pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); + if (!pneg_inbuf) + return -ENOMEM; + + pneg_inbuf->Capabilities = cpu_to_le32(tcon->ses->server->vals->req_capabilities); - memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid, + memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid, SMB2_CLIENT_GUID_SIZE); if (tcon->ses->sign) - vneg_inbuf.SecurityMode = + pneg_inbuf->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) - vneg_inbuf.SecurityMode = + pneg_inbuf->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else - vneg_inbuf.SecurityMode = 0; + pneg_inbuf->SecurityMode = 0; if (strcmp(tcon->ses->server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { - vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID); - vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID); - vneg_inbuf.DialectCount = cpu_to_le16(2); + pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); + pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); + pneg_inbuf->DialectCount = cpu_to_le16(2); /* structure is big enough for 3 dialects, sending only 2 */ - inbuflen = sizeof(struct validate_negotiate_info_req) - 2; + inbuflen = sizeof(*pneg_inbuf) - + sizeof(pneg_inbuf->Dialects[0]); } else if (strcmp(tcon->ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { - vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID); - vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID); - vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID); - vneg_inbuf.DialectCount = cpu_to_le16(3); + pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); + pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); + pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); + pneg_inbuf->DialectCount = cpu_to_le16(3); /* structure is big enough for 3 dialects */ - inbuflen = sizeof(struct validate_negotiate_info_req); + inbuflen = sizeof(*pneg_inbuf); } else { /* otherwise specific dialect was requested */ - vneg_inbuf.Dialects[0] = + pneg_inbuf->Dialects[0] = cpu_to_le16(tcon->ses->server->vals->protocol_id); - vneg_inbuf.DialectCount = cpu_to_le16(1); + pneg_inbuf->DialectCount = cpu_to_le16(1); /* structure is big enough for 3 dialects, sending only 1 */ - inbuflen = sizeof(struct validate_negotiate_info_req) - 4; + inbuflen = sizeof(*pneg_inbuf) - + sizeof(pneg_inbuf->Dialects[0]) * 2; } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, - (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), - (char **)&pneg_rsp, &rsplen); + (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen); if (rc != 0) { cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); - return -EIO; + rc = -EIO; + goto out_free_inbuf; } - if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { + rc = -EIO; + if (rsplen != sizeof(*pneg_rsp)) { cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", rsplen); /* relax check since Mac returns max bufsize allowed on ioctl */ - if ((rsplen > CIFSMaxBufSize) - || (rsplen < sizeof(struct validate_negotiate_info_rsp))) - goto err_rsp_free; + if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) + goto out_free_rsp; } /* check validate negotiate info response matches what we got earlier */ @@ -837,15 +839,17 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) goto vneg_out; /* validate negotiate successful */ + rc = 0; cifs_dbg(FYI, "validate negotiate info successful\n"); - kfree(pneg_rsp); - return 0; + goto out_free_rsp; vneg_out: cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); -err_rsp_free: +out_free_rsp: kfree(pneg_rsp); - return -EIO; +out_free_inbuf: + kfree(pneg_inbuf); + return rc; } enum securityEnum @@ -2590,7 +2594,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len, * If we want to do a RDMA write, fill in and append * smbd_buffer_descriptor_v1 to the end of read request */ - if (server->rdma && rdata && + if (server->rdma && rdata && !server->sign && rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { struct smbd_buffer_descriptor_v1 *v1; @@ -2968,7 +2972,7 @@ smb2_async_writev(struct cifs_writedata *wdata, * If we want to do a server RDMA read, fill in and append * smbd_buffer_descriptor_v1 to the end of write request */ - if (server->rdma && wdata->bytes >= + if (server->rdma && !server->sign && wdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { struct smbd_buffer_descriptor_v1 *v1; diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 6093e5142b2b..d28f358022c5 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -297,7 +297,7 @@ struct smb2_encryption_neg_context { __le16 DataLength; __le32 Reserved; __le16 CipherCount; /* AES-128-GCM and AES-128-CCM */ - __le16 Ciphers[2]; /* Ciphers[0] since only one used now */ + __le16 Ciphers[1]; /* Ciphers[0] since only one used now */ } __packed; struct smb2_negotiate_rsp { diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 5008af546dd1..c62f7c95683c 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -1028,7 +1028,7 @@ static int smbd_post_send(struct smbd_connection *info, for (i = 0; i < request->num_sge; i++) { log_rdma_send(INFO, "rdma_request sge[%d] addr=%llu length=%u\n", - i, request->sge[0].addr, request->sge[0].length); + i, request->sge[i].addr, request->sge[i].length); ib_dma_sync_single_for_device( info->id->device, request->sge[i].addr, @@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) int start, i, j; int max_iov_size = info->max_send_size - sizeof(struct smbd_data_transfer); - struct kvec iov[SMBDIRECT_MAX_SGE]; + struct kvec *iov; int rc; info->smbd_send_pending++; @@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) } /* - * This usually means a configuration error - * We use RDMA read/write for packet size > rdma_readwrite_threshold - * as long as it's properly configured we should never get into this - * situation - */ - if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) { - log_write(ERR, "maximum send segment %x exceeding %x\n", - rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE); - rc = -EINVAL; - goto done; - } - - /* - * Remove the RFC1002 length defined in MS-SMB2 section 2.1 - * It is used only for TCP transport + * Skip the RFC1002 length defined in MS-SMB2 section 2.1 + * It is used only for TCP transport in the iov[0] * In future we may want to add a transport layer under protocol * layer so this will only be issued to TCP transport */ - iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4; - iov[0].iov_len = rqst->rq_iov[0].iov_len - 4; - buflen += iov[0].iov_len; + + if (rqst->rq_iov[0].iov_len != 4) { + log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len); + return -EINVAL; + } + iov = &rqst->rq_iov[1]; /* total up iov array first */ - for (i = 1; i < rqst->rq_nvec; i++) { - iov[i].iov_base = rqst->rq_iov[i].iov_base; - iov[i].iov_len = rqst->rq_iov[i].iov_len; + for (i = 0; i < rqst->rq_nvec-1; i++) { buflen += iov[i].iov_len; } @@ -2139,6 +2127,10 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) goto done; } + cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen); + for (i = 0; i < rqst->rq_nvec-1; i++) + dump_smb(iov[i].iov_base, iov[i].iov_len); + remaining_data_length = buflen; log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " @@ -2194,12 +2186,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) goto done; } i++; + if (i == rqst->rq_nvec-1) + break; } start = i; buflen = 0; } else { i++; - if (i == rqst->rq_nvec) { + if (i == rqst->rq_nvec-1) { /* send out all remaining vecs */ remaining_data_length -= buflen; log_write(INFO, diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 8f6f25918229..927226a2122f 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, goto out; #ifdef CONFIG_CIFS_SMB311 - if (ses->status == CifsNew) + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) smb311_update_preauth_hash(ses, rqst->rq_iov+1, rqst->rq_nvec-1); #endif @@ -798,7 +798,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, *resp_buf_type = CIFS_SMALL_BUFFER; #ifdef CONFIG_CIFS_SMB311 - if (ses->status == CifsNew) { + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) { struct kvec iov = { .iov_base = buf + 4, .iov_len = get_rfc1002_length(buf) @@ -834,8 +834,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL); - if (!new_iov) + if (!new_iov) { + /* otherwise cifs_send_recv below sets resp_buf_type */ + *resp_buf_type = CIFS_NO_BUFFER; return -ENOMEM; + } } else new_iov = s_iov; diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 846ca150d52e..4dd842f72846 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -1997,6 +1997,16 @@ out: return rc; } +static bool is_dot_dotdot(const char *name, size_t name_size) +{ + if (name_size == 1 && name[0] == '.') + return true; + else if (name_size == 2 && name[0] == '.' && name[1] == '.') + return true; + + return false; +} + /** * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext * @plaintext_name: The plaintext name @@ -2021,13 +2031,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, size_t packet_size; int rc = 0; - if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) - && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) - && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) - && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, - ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { - const char *orig_name = name; - size_t orig_name_size = name_size; + if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && + !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) { + if (is_dot_dotdot(name, name_size)) { + rc = ecryptfs_copy_filename(plaintext_name, + plaintext_name_size, + name, name_size); + goto out; + } + + if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE || + strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, + ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) { + rc = -EINVAL; + goto out; + } name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; @@ -2047,12 +2065,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, decoded_name, decoded_name_size); if (rc) { - printk(KERN_INFO "%s: Could not parse tag 70 packet " - "from filename; copying through filename " - "as-is\n", __func__); - rc = ecryptfs_copy_filename(plaintext_name, - plaintext_name_size, - orig_name, orig_name_size); + ecryptfs_printk(KERN_DEBUG, + "%s: Could not parse tag 70 packet from filename\n", + __func__); goto out_free; } } else { diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c74ed3ca3372..b76a9853325e 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name, buf->sb, lower_name, lower_namelen); if (rc) { - printk(KERN_ERR "%s: Error attempting to decode and decrypt " - "filename [%s]; rc = [%d]\n", __func__, lower_name, - rc); - goto out; + if (rc != -EINVAL) { + ecryptfs_printk(KERN_DEBUG, + "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n", + __func__, lower_name, rc); + return rc; + } + + /* Mask -EINVAL errors as these are most likely due a plaintext + * filename present in the lower filesystem despite filename + * encryption being enabled. One unavoidable example would be + * the "lost+found" dentry in the root directory of an Ext4 + * filesystem. + */ + return 0; } + buf->caller->pos = buf->ctx.pos; rc = !dir_emit(buf->caller, name, name_size, ino, d_type); kfree(name); if (!rc) buf->entries_written++; -out: + return rc; } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 847904aa63a9..97d17eaeba07 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -395,8 +395,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; - if (mount_crypt_stat - && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { + if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { rc = ecryptfs_encrypt_and_encode_filename( &encrypted_and_encoded_name, &len, mount_crypt_stat, name, len); diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c89a58cfc991..e74fe84d0886 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1880,7 +1880,7 @@ find_next_matching_auth_tok: candidate_auth_tok = &auth_tok_list_item->auth_tok; if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, - "Considering cadidate auth tok:\n"); + "Considering candidate auth tok:\n"); ecryptfs_dump_auth_tok(candidate_auth_tok); } rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig, diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 09640220fda8..047c327a6b23 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -88,11 +88,11 @@ out_unlock: * The default page_lock and i_size verification done by non-DAX fault paths * is sufficient because ext2 doesn't support hole punching. */ -static int ext2_dax_fault(struct vm_fault *vmf) +static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); - int ret; + vm_fault_t ret; if (vmf->flags & FAULT_FLAG_WRITE) { sb_start_pagefault(inode->i_sb); diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index a33d8fb1bf2a..508b905d744d 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t offset; ext4_grpblk_t next_zero_bit; + ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb); ext4_fsblk_t blk; ext4_fsblk_t group_first_block; @@ -338,7 +339,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, /* check whether block bitmap block number is set */ blk = ext4_block_bitmap(sb, desc); offset = blk - group_first_block; - if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) /* bad block bitmap */ return blk; @@ -346,7 +347,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, /* check whether the inode bitmap block number is set */ blk = ext4_inode_bitmap(sb, desc); offset = blk - group_first_block; - if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) /* bad block bitmap */ return blk; @@ -354,8 +355,8 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, /* check whether the inode table block number is set */ blk = ext4_inode_table(sb, desc); offset = blk - group_first_block; - if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || - EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= sb->s_blocksize) + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || + EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit) return blk; next_zero_bit = ext4_find_next_zero_bit(bh->b_data, EXT4_B2C(sbi, offset + sbi->s_itb_per_group), diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0a7315961bac..c969275ce3ee 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5329,8 +5329,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, stop = le32_to_cpu(extent->ee_block); /* - * In case of left shift, Don't start shifting extents until we make - * sure the hole is big enough to accommodate the shift. + * For left shifts, make sure the hole on the left is big enough to + * accommodate the shift. For right shifts, make sure the last extent + * won't be shifted beyond EXT_MAX_BLOCKS. */ if (SHIFT == SHIFT_LEFT) { path = ext4_find_extent(inode, start - 1, &path, @@ -5350,9 +5351,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, if ((start == ex_start && shift > ex_start) || (shift > start - ex_end)) { - ext4_ext_drop_refs(path); - kfree(path); - return -EINVAL; + ret = -EINVAL; + goto out; + } + } else { + if (shift > EXT_MAX_BLOCKS - + (stop + ext4_ext_get_actual_len(extent))) { + ret = -EINVAL; + goto out; } } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 185f7e61f4cf..eb104e8476f0 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5886,5 +5886,6 @@ static void __exit ext4_exit_fs(void) MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crc32c"); module_init(ext4_init_fs) module_exit(ext4_exit_fs) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4b12ba70a895..471d863958bc 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) */ if (inode && inode_to_wb_is_valid(inode)) { struct bdi_writeback *wb; - bool locked, congested; + struct wb_lock_cookie lock_cookie = {}; + bool congested; - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); congested = wb_congested(wb, cong_bits); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &lock_cookie); return congested; } @@ -1960,7 +1961,7 @@ void wb_workfn(struct work_struct *work) } if (!list_empty(&wb->work_list)) - mod_delayed_work(bdi_wq, &wb->dwork, 0); + wb_wakeup(wb); else if (wb_has_dirty_io(wb) && dirty_writeback_interval) wb_wakeup_delayed(wb); diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index 9bb2fe35799d..10205ececc27 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/bio.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/zlib.h> @@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, >> bufshift; int haveblocks; blkcnt_t blocknum; - struct buffer_head *bhs[needblocks + 1]; + struct buffer_head **bhs; int curbh, curpage; if (block_size > deflateBound(1UL << zisofs_block_shift)) { @@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, /* Because zlib is not thread-safe, do all the I/O at the top. */ blocknum = block_start >> bufshift; - memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); + bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL); + if (!bhs) { + *errp = -ENOMEM; + return 0; + } haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); @@ -190,6 +195,7 @@ z_eio: b_eio: for (i = 0; i < haveblocks; i++) brelse(bhs[i]); + kfree(bhs); return stream.total_out; } @@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page *page) unsigned int zisofs_pages_per_cblock = PAGE_SHIFT <= zisofs_block_shift ? (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; - struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; + struct page **pages; pgoff_t index = page->index, end_index; end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page *page) full_page = 0; pcount = 1; } + pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), + sizeof(*pages), GFP_KERNEL); + if (!pages) { + unlock_page(page); + return -ENOMEM; + } pages[full_page] = page; for (i = 0; i < pcount; i++, index++) { @@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page *page) } /* At this point, err contains 0 or -EIO depending on the "critical" page */ + kfree(pages); return err; } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index bc258a4402f6..ec3fba7d492f 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt) break; #ifdef CONFIG_JOLIET case Opt_iocharset: + kfree(popt->iocharset); popt->iocharset = match_strdup(&args[0]); + if (!popt->iocharset) + return 0; break; #endif case Opt_map_a: diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ac311037d7a5..8aa453784402 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, */ ret = start_this_handle(journal, handle, GFP_NOFS); if (ret < 0) { + handle->h_journal = journal; jbd2_journal_free_reserved(handle); return ret; } diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index f60dee7faf03..87bdf0f4cba1 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb) static void jffs2_kill_sb(struct super_block *sb) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - if (!sb_rdonly(sb)) + if (c && !sb_rdonly(sb)) jffs2_stop_garbage_collect_thread(c); kill_mtd_super(sb); kfree(c); diff --git a/fs/namespace.c b/fs/namespace.c index e398f32d7541..5f75969adff1 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, goto out_free; } - mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); + mnt->mnt.mnt_flags = old->mnt.mnt_flags; + mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); /* Don't allow unprivileged users to change mount flags */ if (flag & CL_UNPRIVILEGED) { mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; @@ -2814,7 +2815,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); - if (flags & SB_RDONLY) + if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; /* The default atime for remount is preservation */ diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index d51e1bb781cf..d94e8031fe5f 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, u32 event_mask, const void *data, int data_type) { - __u32 marks_mask, marks_ignored_mask; + __u32 marks_mask = 0, marks_ignored_mask = 0; const struct path *path = data; pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" @@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, !d_can_lookup(path->dentry)) return false; - if (inode_mark && vfsmnt_mark) { - marks_mask = (vfsmnt_mark->mask | inode_mark->mask); - marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); - } else if (inode_mark) { - /* - * if the event is for a child and this inode doesn't care about - * events on the child, don't send it! - */ - if ((event_mask & FS_EVENT_ON_CHILD) && - !(inode_mark->mask & FS_EVENT_ON_CHILD)) - return false; - marks_mask = inode_mark->mask; - marks_ignored_mask = inode_mark->ignored_mask; - } else if (vfsmnt_mark) { - marks_mask = vfsmnt_mark->mask; - marks_ignored_mask = vfsmnt_mark->ignored_mask; - } else { - BUG(); + /* + * if the event is for a child and this inode doesn't care about + * events on the child, don't send it! + */ + if (inode_mark && + (!(event_mask & FS_EVENT_ON_CHILD) || + (inode_mark->mask & FS_EVENT_ON_CHILD))) { + marks_mask |= inode_mark->mask; + marks_ignored_mask |= inode_mark->ignored_mask; + } + + if (vfsmnt_mark) { + marks_mask |= vfsmnt_mark->mask; + marks_ignored_mask |= vfsmnt_mark->ignored_mask; } if (d_is_dir(path->dentry) && diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 219b269c737e..613ec7e5a465 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; - __u32 inode_test_mask = 0; - __u32 vfsmount_test_mask = 0; + __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); + __u32 marks_mask = 0; + __u32 marks_ignored_mask = 0; if (unlikely(!inode_mark && !vfsmount_mark)) { BUG(); @@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell, /* does the inode mark tell us to do something? */ if (inode_mark) { group = inode_mark->group; - inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); - inode_test_mask &= inode_mark->mask; - inode_test_mask &= ~inode_mark->ignored_mask; + marks_mask |= inode_mark->mask; + marks_ignored_mask |= inode_mark->ignored_mask; } /* does the vfsmount_mark tell us to do something? */ if (vfsmount_mark) { - vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); group = vfsmount_mark->group; - vfsmount_test_mask &= vfsmount_mark->mask; - vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; - if (inode_mark) - vfsmount_test_mask &= ~inode_mark->ignored_mask; + marks_mask |= vfsmount_mark->mask; + marks_ignored_mask |= vfsmount_mark->ignored_mask; } pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" - " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" + " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x" " data=%p data_is=%d cookie=%d\n", - __func__, group, to_tell, mask, inode_mark, - inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, + __func__, group, to_tell, mask, inode_mark, vfsmount_mark, + marks_mask, marks_ignored_mask, data, data_is, cookie); - if (!inode_test_mask && !vfsmount_test_mask) + if (!(test_mask & marks_mask & ~marks_ignored_mask)) return 0; return group->ops->handle_event(group, to_tell, inode_mark, diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 01c6b3894406..7869622af22a 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4250,10 +4250,11 @@ out: static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, bool preserve) { - int error; + int error, had_lock; struct inode *inode = d_inode(old_dentry); struct buffer_head *old_bh = NULL; struct inode *new_orphan_inode = NULL; + struct ocfs2_lock_holder oh; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; @@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, goto out; } + had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1, + &oh); + if (had_lock < 0) { + error = had_lock; + mlog_errno(error); + goto out; + } + /* If the security isn't preserved, we need to re-initialize them. */ if (!preserve) { error = ocfs2_init_security_and_acl(dir, new_orphan_inode, @@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, if (error) mlog_errno(error); } -out: if (!error) { error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, new_dentry); if (error) mlog_errno(error); } + ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock); +out: if (new_orphan_inode) { /* * We need to open_unlock the inode no matter whether we diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 3ae5fdba0225..10796d3fe27d 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block *sb) /* provided sb cleanup */ kill_anon_super(sb); + if (!ORANGEFS_SB(sb)) { + mutex_lock(&orangefs_request_mutex); + mutex_unlock(&orangefs_request_mutex); + return; + } /* * issue the unmount to userspace to tell it to remove the * dynamic mount info it has for this superblock diff --git a/fs/proc/base.c b/fs/proc/base.c index eafa39a3a88c..1b2ede6abcdf 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode, kuid_t uid; kgid_t gid; + if (unlikely(task->flags & PF_KTHREAD)) { + *ruid = GLOBAL_ROOT_UID; + *rgid = GLOBAL_ROOT_GID; + return; + } + /* Default to the tasks effective ownership */ rcu_read_lock(); cred = __task_cred(task); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index d1e82761de81..e64ecb9f2720 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) { struct list_head *head = (struct list_head *)arg; struct kcore_list *ent; + struct page *p; + + if (!pfn_valid(pfn)) + return 1; + + p = pfn_to_page(pfn); + if (!memmap_valid_within(pfn, p, page_zone(p))) + return 1; ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) return -ENOMEM; - ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT)); + ent->addr = (unsigned long)page_to_virt(p); ent->size = nr_pages << PAGE_SHIFT; - /* Sanity check: Can happen in 32bit arch...maybe */ - if (ent->addr < (unsigned long) __va(0)) + if (!virt_addr_valid(ent->addr)) goto free_out; /* cut not-mapped area. ....from ppc-32 code. */ if (ULONG_MAX - ent->addr < ent->size) ent->size = ULONG_MAX - ent->addr; - /* cut when vmalloc() area is higher than direct-map area */ - if (VMALLOC_START > (unsigned long)__va(0)) { - if (ent->addr > VMALLOC_START) - goto free_out; + /* + * We've already checked virt_addr_valid so we know this address + * is a valid pointer, therefore we can check against it to determine + * if we need to trim + */ + if (VMALLOC_START > ent->addr) { if (VMALLOC_START - ent->addr < ent->size) ent->size = VMALLOC_START - ent->addr; } diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index a000d7547479..b572cc865b92 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v) LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), nr_running(), nr_threads, - idr_get_cursor(&task_active_pid_ns(current)->idr)); + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 65ae54659833..c486ad4b43f0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); + unsigned long offset = swp_offset(entry); + offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; frame = swp_type(entry) | - (swp_offset(entry) << MAX_SWAPFILES_SHIFT); + (offset << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; @@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, break; if (pm->show_pfn && (flags & PM_PRESENT)) frame++; + else if (flags & PM_SWAP) + frame += (1 << MAX_SWAPFILES_SHIFT); } spin_unlock(ptl); return err; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 020c597ef9b6..d88231e3b2be 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2966,7 +2966,7 @@ static int __init dquot_init(void) NULL); order = 0; - dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); + dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); if (!dquot_hash) panic("Cannot create dquot hash table"); diff --git a/fs/super.c b/fs/super.c index 5fa9a8d8d865..122c402049a2 100644 --- a/fs/super.c +++ b/fs/super.c @@ -167,6 +167,7 @@ static void destroy_unused_super(struct super_block *s) security_sb_free(s); put_user_ns(s->s_user_ns); kfree(s->s_subtype); + free_prealloced_shrinker(&s->s_shrink); /* no delays needed */ destroy_super_work(&s->destroy_work); } @@ -252,6 +253,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, s->s_shrink.count_objects = super_cache_count; s->s_shrink.batch = 1024; s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; + if (prealloc_shrinker(&s->s_shrink)) + goto fail; return s; fail: @@ -518,11 +521,7 @@ retry: hlist_add_head(&s->s_instances, &type->fs_supers); spin_unlock(&sb_lock); get_filesystem(type); - err = register_shrinker(&s->s_shrink); - if (err) { - deactivate_locked_super(s); - s = ERR_PTR(err); - } + register_shrinker_prepared(&s->s_shrink); return s; } diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index f897e55f2cd0..16a8ad21b77e 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c @@ -28,6 +28,9 @@ #include "udf_sb.h" +#define SURROGATE_MASK 0xfffff800 +#define SURROGATE_PAIR 0x0000d800 + static int udf_uni2char_utf8(wchar_t uni, unsigned char *out, int boundlen) @@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni, if (boundlen <= 0) return -ENAMETOOLONG; + if ((uni & SURROGATE_MASK) == SURROGATE_PAIR) + return -EINVAL; + if (uni < 0x80) { out[u_len++] = (unsigned char)uni; } else if (uni < 0x800) { diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index ce4a34a2751d..35a124400d60 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -511,7 +511,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args) if (args->flags & ATTR_CREATE) return retval; retval = xfs_attr_shortform_remove(args); - ASSERT(retval == 0); + if (retval) + return retval; + /* + * Since we have removed the old attr, clear ATTR_REPLACE so + * that the leaf format add routine won't trip over the attr + * not being around. + */ + args->flags &= ~ATTR_REPLACE; } if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 6a7c2f03ea11..040eeda8426f 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -725,12 +725,16 @@ xfs_bmap_extents_to_btree( *logflagsp = 0; if ((error = xfs_alloc_vextent(&args))) { xfs_iroot_realloc(ip, -1, whichfork); + ASSERT(ifp->if_broot == NULL); + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { xfs_iroot_realloc(ip, -1, whichfork); + ASSERT(ifp->if_broot == NULL); + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return -ENOSPC; } diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index ef68b1de006a..1201107eabc6 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -466,6 +466,8 @@ xfs_dinode_verify( return __this_address; if (di_size > XFS_DFORK_DSIZE(dip, mp)) return __this_address; + if (dip->di_nextents) + return __this_address; /* fall through */ case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: @@ -484,12 +486,31 @@ xfs_dinode_verify( if (XFS_DFORK_Q(dip)) { switch (dip->di_aformat) { case XFS_DINODE_FMT_LOCAL: + if (dip->di_anextents) + return __this_address; + /* fall through */ case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: break; default: return __this_address; } + } else { + /* + * If there is no fork offset, this may be a freshly-made inode + * in a new disk cluster, in which case di_aformat is zeroed. + * Otherwise, such an inode must be in EXTENTS format; this goes + * for freed inodes as well. + */ + switch (dip->di_aformat) { + case 0: + case XFS_DINODE_FMT_EXTENTS: + break; + default: + return __this_address; + } + if (dip->di_anextents) + return __this_address; } /* only version 3 or greater inodes are extensively verified here */ diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 299aee4b7b0b..e70fb8ccecea 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -778,22 +778,26 @@ xfs_file_fallocate( if (error) goto out_unlock; } else if (mode & FALLOC_FL_INSERT_RANGE) { - unsigned int blksize_mask = i_blocksize(inode) - 1; + unsigned int blksize_mask = i_blocksize(inode) - 1; + loff_t isize = i_size_read(inode); - new_size = i_size_read(inode) + len; if (offset & blksize_mask || len & blksize_mask) { error = -EINVAL; goto out_unlock; } - /* check the new inode size does not wrap through zero */ - if (new_size > inode->i_sb->s_maxbytes) { + /* + * New inode size must not exceed ->s_maxbytes, accounting for + * possible signed overflow. + */ + if (inode->i_sb->s_maxbytes - isize < len) { error = -EFBIG; goto out_unlock; } + new_size = isize + len; /* Offset should be less than i_size */ - if (offset >= i_size_read(inode)) { + if (offset >= isize) { error = -EINVAL; goto out_unlock; } @@ -876,8 +880,18 @@ xfs_file_dedupe_range( struct file *dst_file, u64 dst_loff) { + struct inode *srci = file_inode(src_file); + u64 max_dedupe; int error; + /* + * Since we have to read all these pages in to compare them, cut + * it off at MAX_RW_COUNT/2 rounded down to the nearest block. + * That means we won't do more than MAX_RW_COUNT IO per request. + */ + max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1); + if (len > max_dedupe) + len = max_dedupe; error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff, len, true); if (error) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 278841c75b97..af240573e482 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -188,7 +188,7 @@ #endif #ifdef CONFIG_SERIAL_EARLYCON -#define EARLYCON_TABLE() STRUCT_ALIGN(); \ +#define EARLYCON_TABLE() . = ALIGN(8); \ VMLINUX_SYMBOL(__earlycon_table) = .; \ KEEP(*(__earlycon_table)) \ VMLINUX_SYMBOL(__earlycon_table_end) = .; diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index 562fa7df2637..98e63d870139 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -19,7 +19,7 @@ #define DRM_HDCP_RI_LEN 2 #define DRM_HDCP_V_PRIME_PART_LEN 4 #define DRM_HDCP_V_PRIME_NUM_PARTS 5 -#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f) +#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) #define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h index 86e3ec662ef4..90ec780bfc68 100644 --- a/include/dt-bindings/clock/stm32mp1-clks.h +++ b/include/dt-bindings/clock/stm32mp1-clks.h @@ -76,7 +76,7 @@ #define I2C6 63 #define USART1 64 #define RTCAPB 65 -#define TZC 66 +#define TZC1 66 #define TZPC 67 #define IWDG1 68 #define BSEC 69 @@ -123,6 +123,7 @@ #define CRC1 110 #define USBH 111 #define ETHSTP 112 +#define TZC2 113 /* Kernel clocks */ #define SDMMC1_K 118 @@ -228,7 +229,6 @@ #define CK_MCO2 212 /* TRACE & DEBUG clocks */ -#define DBG 213 #define CK_DBG 214 #define CK_TRACE 215 diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index e518e4e3dfb5..4b1548129fa2 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) * Our PSCI implementation stays the same across versions from * v0.2 onward, only adding the few mandatory functions (such * as FEATURES with 1.0) that are required by newer - * revisions. It is thus safe to return the latest. + * revisions. It is thus safe to return the latest, unless + * userspace has instructed us otherwise. */ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu->kvm->arch.psci_version) + return vcpu->kvm->arch.psci_version; + return KVM_ARM_PSCI_LATEST; + } return KVM_ARM_PSCI_0_1; } @@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); +struct kvm_one_reg; + +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); + #endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 24f03941ada8..e7efe12a81bd 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -131,6 +131,7 @@ struct vgic_irq { u32 mpidr; /* GICv3 target VCPU */ }; u8 source; /* GICv2 SGIs only */ + u8 active_source; /* GICv2 SGIs only */ u8 priority; enum vgic_irq_config config; /* Level or edge */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index bfe86b54f6c1..0bd432a4d7bd 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) set_wb_congested(bdi->wb.congested, sync); } +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + #ifdef CONFIG_CGROUP_WRITEBACK /** diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f6be4b0b6c18..72ca0f3d39f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode - * @lockedp: temp bool output param, to be passed to the end function + * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This @@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * - * The caller must call unlocked_inode_to_wb_end() with *@lockdep - * afterwards and can't sleep during transaction. IRQ may or may not be - * disabled on return. + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and + * can't sleep during the transaction. IRQs may or may not be disabled on + * return. */ static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); @@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) * Paired with store_release in inode_switch_wb_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ - *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; - if (unlikely(*lockedp)) - xa_lock_irq(&inode->i_mapping->i_pages); + if (unlikely(cookie->locked)) + xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages @@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode - * @locked: *@lockedp from unlocked_inode_to_wb_begin() + * @cookie: @cookie from unlocked_inode_to_wb_begin() */ -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) { - if (unlikely(locked)) - xa_unlock_irq(&inode->i_mapping->i_pages); + if (unlikely(cookie->locked)) + xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } @@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) } static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) { } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index e3986f4b3461..ebc34a5686dc 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -9,6 +9,9 @@ struct blk_mq_tags; struct blk_flush_queue; +/** + * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device + */ struct blk_mq_hw_ctx { struct { spinlock_t lock; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 21e21f273a21..5c4eee043191 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -742,6 +742,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_preempt_only(q) \ test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) +#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) extern int blk_set_preempt_only(struct request_queue *q); extern void blk_clear_preempt_only(struct request_queue *q); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 95a7abd0ee92..469b20e1dd7e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -31,6 +31,7 @@ struct bpf_map_ops { void (*map_release)(struct bpf_map *map, struct file *map_file); void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); + void (*map_release_uref)(struct bpf_map *map); /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); @@ -339,8 +340,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, struct bpf_prog *old_prog); int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, - __u32 __user *prog_ids, u32 request_cnt, - __u32 __user *prog_cnt); + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt); int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, @@ -351,6 +352,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog **_prog, *__prog; \ struct bpf_prog_array *_array; \ u32 _ret = 1; \ + preempt_disable(); \ rcu_read_lock(); \ _array = rcu_dereference(array); \ if (unlikely(check_non_null && !_array))\ @@ -362,6 +364,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, } \ _out: \ rcu_read_unlock(); \ + preempt_enable_no_resched(); \ _ret; \ }) @@ -434,7 +437,6 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); -void bpf_fd_array_map_clear(struct bpf_map *map); int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index d3339dd48b1a..b324e01ccf2d 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -25,6 +25,7 @@ #define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 #define PHY_ID_BCM57780 0x03625d90 +#define PHY_ID_BCM89610 0x03625cd0 #define PHY_ID_BCM7250 0xae025280 #define PHY_ID_BCM7260 0xae025190 diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 528ccc943cee..96bb32285989 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -77,7 +77,10 @@ struct ceph_osd_data { u32 bio_length; }; #endif /* CONFIG_BLOCK */ - struct ceph_bvec_iter bvec_pos; + struct { + struct ceph_bvec_iter bvec_pos; + u32 num_bvecs; + }; }; }; @@ -412,6 +415,10 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, struct ceph_bio_iter *bio_pos, u32 bio_length); #endif /* CONFIG_BLOCK */ +void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes); void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_bvec_iter *bvec_pos); @@ -426,7 +433,8 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, bool own_pages); void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, unsigned int which, - struct bio_vec *bvecs, u32 bytes); + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes); extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, struct page **pages, u64 length, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 210a890008f9..1d25e149c1c5 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -765,6 +765,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw, int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); int __clk_mux_determine_rate_closest(struct clk_hw *hw, struct clk_rate_request *req); +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags); void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ceb96ecab96e..7d98e263e048 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -25,6 +25,9 @@ #define __SANITIZE_ADDRESS__ #endif +#undef __no_sanitize_address +#define __no_sanitize_address __attribute__((no_sanitize("address"))) + /* Clang doesn't have a way to turn it off per-function, yet. */ #ifdef __noretpoline #undef __noretpoline diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _LINUX_CORESIGHT_PMU_H diff --git a/include/linux/device.h b/include/linux/device.h index 0059b99e1f25..477956990f5e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -256,7 +256,9 @@ enum probe_type { * automatically. * @pm: Power management operations of the device which matched * this driver. - * @coredump: Called through sysfs to initiate a device coredump. + * @coredump: Called when sysfs entry is written to. The device driver + * is expected to call the dev_coredump API resulting in a + * uevent. * @p: Driver core's private data, no one other than the driver * core can touch this. * @@ -288,7 +290,7 @@ struct device_driver { const struct attribute_group **groups; const struct dev_pm_ops *pm; - int (*coredump) (struct device *dev); + void (*coredump) (struct device *dev); struct driver_private *p; }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index ebe41811ed34..b32cd2062f18 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS * instead of the latter), any change to them will be overwritten * by kernel. Returns a negative error code or zero. + * @get_fecparam: Get the network device Forward Error Correction parameters. + * @set_fecparam: Set the network device Forward Error Correction parameters. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 9f1edb92c97e..e64c0294f50b 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -217,12 +217,10 @@ struct fsnotify_mark_connector { union { /* Object pointer [lock] */ struct inode *inode; struct vfsmount *mnt; - }; - union { - struct hlist_head list; /* Used listing heads to free after srcu period expires */ struct fsnotify_mark_connector *destroy_next; }; + struct hlist_head list; }; /* @@ -248,7 +246,7 @@ struct fsnotify_mark { /* Group this mark is for. Set on mark creation, stable until last ref * is dropped */ struct fsnotify_group *group; - /* List of marks by group->i_fsnotify_marks. Also reused for queueing + /* List of marks by group->marks_list. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ struct list_head g_list; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index c826b0b5232a..6cb8a5789668 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part) part_stat_add(cpu, gendiskp, field, -subnd) void part_in_flight(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); + unsigned int inflight[2]); +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw); void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, diff --git a/include/linux/hid.h b/include/linux/hid.h index 8da3e1f48195..26240a22978a 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -516,6 +516,12 @@ enum hid_type { HID_TYPE_USBNONE }; +enum hid_battery_status { + HID_BATTERY_UNKNOWN = 0, + HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ + HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ +}; + struct hid_driver; struct hid_ll_driver; @@ -558,7 +564,8 @@ struct hid_device { /* device report descriptor */ __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; - bool battery_reported; + enum hid_battery_status battery_status; + bool battery_avoid_query; #endif unsigned int status; /* see STAT flags above */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index a2656c3ebe81..3892e9c8b2de 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -161,9 +161,11 @@ struct hrtimer_clock_base { enum hrtimer_base_type { HRTIMER_BASE_MONOTONIC, HRTIMER_BASE_REALTIME, + HRTIMER_BASE_BOOTTIME, HRTIMER_BASE_TAI, HRTIMER_BASE_MONOTONIC_SOFT, HRTIMER_BASE_REALTIME_SOFT, + HRTIMER_BASE_BOOTTIME_SOFT, HRTIMER_BASE_TAI_SOFT, HRTIMER_MAX_CLOCK_BASES, }; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d11f41d5269f..78a5a90b4267 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -663,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) * Returns true if the skb is tagged with multiple vlan headers, regardless * of whether it is hardware accelerated or not. */ -static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) +static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) { __be16 protocol = skb->protocol; @@ -673,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) if (likely(!eth_type_vlan(protocol))) return false; + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) + return false; + veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } @@ -690,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) * * Returns features without unsafe ones if the skb has multiple tags. */ -static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, +static inline netdev_features_t vlan_features_check(struct sk_buff *skb, netdev_features_t features) { if (skb_vlan_tagged_multi(skb)) { diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c1961761311d..2803264c512f 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); +void kthread_park_complete(struct task_struct *k); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 4754f01c1abb..aec44b1d9582 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void) IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); } +typedef int (*klp_shadow_ctor_t)(void *obj, + void *shadow_data, + void *ctor_data); +typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); + void *klp_shadow_get(void *obj, unsigned long id); -void *klp_shadow_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags); -void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags); -void klp_shadow_free(void *obj, unsigned long id); -void klp_shadow_free_all(unsigned long id); +void *klp_shadow_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data); +void *klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data); +void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); +void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); #else /* !CONFIG_LIVEPATCH */ diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index eb492d47f717..8f9c90379732 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h @@ -70,4 +70,12 @@ #define LAN88XX_MMD3_CHIP_ID (32877) #define LAN88XX_MMD3_CHIP_REV (32878) +/* DSP registers */ +#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) +#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) +#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) +#define LAN88XX_EXT_PAGE_TR_CR 16 +#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 +#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 + #endif /* _MICROCHIPPHY_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 767d193c269a..2a156c5dfadd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1284,25 +1284,19 @@ enum { }; static inline const struct cpumask * -mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) +mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) { - const struct cpumask *mask; struct irq_desc *desc; unsigned int irq; int eqn; int err; - err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); + err = mlx5_vector2eqn(dev, vector, &eqn, &irq); if (err) return NULL; desc = irq_to_desc(irq); -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK - mask = irq_data_get_effective_affinity_mask(&desc->irq_data); -#else - mask = desc->irq_common_data.affinity; -#endif - return mask; + return desc->affinity_hint; } #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index b63fa457febd..3529683f691e 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -85,6 +85,7 @@ struct flchip { unsigned int write_suspended:1; unsigned int erase_suspended:1; unsigned long in_progress_block_addr; + unsigned long in_progress_block_mask; struct mutex mutex; wait_queue_head_t wq; /* Wait on here when we're waiting for the chip diff --git a/include/linux/oom.h b/include/linux/oom.h index 5bad038ac012..6adac113e96d 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm) return 0; } +void __oom_reap_task_mm(struct mm_struct *mm); + extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 6bfd2b581f75..af8a61be2d8d 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -26,6 +26,7 @@ #include <linux/compiler.h> #include <linux/rbtree.h> +#include <linux/rcupdate.h> /* * Please note - only struct rb_augment_callbacks and the prototypes for diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h index ece43e882b56..7d012faa509a 100644 --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -35,6 +35,7 @@ #include <linux/rbtree.h> #include <linux/seqlock.h> +#include <linux/rcupdate.h> struct latch_tree_node { struct rb_node node[2]; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index d09a9c7af109..dfdaede9139e 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -569,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev, int (*probe)(struct rproc_subdev *subdev), - void (*remove)(struct rproc_subdev *subdev, bool graceful)); + void (*remove)(struct rproc_subdev *subdev, bool crashed)); void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); diff --git a/include/linux/sched.h b/include/linux/sched.h index b3d697f3b573..c2413703f45d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -112,17 +112,36 @@ struct task_group; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP +/* + * Special states are those that do not use the normal wait-loop pattern. See + * the comment with set_special_state(). + */ +#define is_special_task_state(state) \ + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) + #define __set_current_state(state_value) \ do { \ + WARN_ON_ONCE(is_special_task_state(state_value));\ current->task_state_change = _THIS_IP_; \ current->state = (state_value); \ } while (0) + #define set_current_state(state_value) \ do { \ + WARN_ON_ONCE(is_special_task_state(state_value));\ current->task_state_change = _THIS_IP_; \ smp_store_mb(current->state, (state_value)); \ } while (0) +#define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ + WARN_ON_ONCE(!is_special_task_state(state_value)); \ + raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) #else /* * set_current_state() includes a barrier so that the write of current->state @@ -144,8 +163,8 @@ struct task_group; * * The above is typically ordered against the wakeup, which does: * - * need_sleep = false; - * wake_up_state(p, TASK_UNINTERRUPTIBLE); + * need_sleep = false; + * wake_up_state(p, TASK_UNINTERRUPTIBLE); * * Where wake_up_state() (and all other wakeup primitives) imply enough * barriers to order the store of the variable against wakeup. @@ -154,12 +173,33 @@ struct task_group; * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * - * This is obviously fine, since they both store the exact same value. + * However, with slightly different timing the wakeup TASK_RUNNING store can + * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not + * a problem either because that will result in one extra go around the loop + * and our @cond test will save the day. * * Also see the comments of try_to_wake_up(). */ -#define __set_current_state(state_value) do { current->state = (state_value); } while (0) -#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) +#define __set_current_state(state_value) \ + current->state = (state_value) + +#define set_current_state(state_value) \ + smp_store_mb(current->state, (state_value)) + +/* + * set_special_state() should be used for those states when the blocking task + * can not use the regular condition based wait-loop. In that case we must + * serialize against wakeups such that any possible in-flight TASK_RUNNING stores + * will not collide with our state change. + */ +#define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ + raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) + #endif /* Task command name length: */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index a7ce74c74e49..113d1ad1ced7 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void) { spin_lock_irq(¤t->sighand->siglock); if (current->jobctl & JOBCTL_STOP_DEQUEUED) - __set_current_state(TASK_STOPPED); + set_special_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); schedule(); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 1d356105f25a..b4c9fda9d833 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -351,10 +351,10 @@ struct earlycon_id { char name[16]; char compatible[128]; int (*setup)(struct earlycon_device *, const char *options); -} __aligned(32); +}; -extern const struct earlycon_id __earlycon_table[]; -extern const struct earlycon_id __earlycon_table_end[]; +extern const struct earlycon_id *__earlycon_table[]; +extern const struct earlycon_id *__earlycon_table_end[]; #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) #define EARLYCON_USED_OR_UNUSED __used @@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[]; #define EARLYCON_USED_OR_UNUSED __maybe_unused #endif -#define OF_EARLYCON_DECLARE(_name, compat, fn) \ - static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ - EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ +#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \ + static const struct earlycon_id unique_id \ + EARLYCON_USED_OR_UNUSED __initconst \ = { .name = __stringify(_name), \ .compatible = compat, \ - .setup = fn } + .setup = fn }; \ + static const struct earlycon_id EARLYCON_USED_OR_UNUSED \ + __section(__earlycon_table) \ + * const __PASTE(__p, unique_id) = &unique_id + +#define OF_EARLYCON_DECLARE(_name, compat, fn) \ + _OF_EARLYCON_DECLARE(_name, compat, fn, \ + __UNIQUE_ID(__earlycon_##_name)) #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 388ff2936a87..6794490f25b2 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -75,6 +75,9 @@ struct shrinker { #define SHRINKER_NUMA_AWARE (1 << 0) #define SHRINKER_MEMCG_AWARE (1 << 1) -extern int register_shrinker(struct shrinker *); -extern void unregister_shrinker(struct shrinker *); +extern int prealloc_shrinker(struct shrinker *shrinker); +extern void register_shrinker_prepared(struct shrinker *shrinker); +extern int register_shrinker(struct shrinker *shrinker); +extern void unregister_shrinker(struct shrinker *shrinker); +extern void free_prealloced_shrinker(struct shrinker *shrinker); #endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h index e8f0f852968f..c0c5c5b73dc0 100644 --- a/include/linux/stringhash.h +++ b/include/linux/stringhash.h @@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash) * losing bits). This also has the property (wanted by the dcache) * that the msbits make a good hash table index. */ -static inline unsigned long end_name_hash(unsigned long hash) +static inline unsigned int end_name_hash(unsigned long hash) { - return __hash_32((unsigned int)hash); + return hash_long(hash, 32); } /* diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 0494db3fd9e8..13770cfe33ad 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h @@ -62,7 +62,7 @@ struct ts_config int flags; /** - * get_next_block - fetch next block of data + * @get_next_block: fetch next block of data * @consumed: number of bytes consumed by the caller * @dst: destination buffer * @conf: search configuration @@ -79,7 +79,7 @@ struct ts_config struct ts_state *state); /** - * finish - finalize/clean a series of get_next_block() calls + * @finish: finalize/clean a series of get_next_block() calls * @conf: search configuration * @state: search state * diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 34f053a150a9..cf2862bd134a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -43,11 +43,7 @@ enum { #define THREAD_ALIGN THREAD_SIZE #endif -#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) -#else -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) -#endif +#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) /* * flag set/clear/test wrappers diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h index 45bc6b376492..53604b087f2c 100644 --- a/include/linux/ti-emif-sram.h +++ b/include/linux/ti-emif-sram.h @@ -60,6 +60,81 @@ struct ti_emif_pm_functions { u32 abort_sr; } __packed __aligned(8); +static inline void ti_emif_asm_offsets(void) +{ + DEFINE(EMIF_SDCFG_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_sdcfg_val)); + DEFINE(EMIF_TIMING1_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_timing1_val)); + DEFINE(EMIF_TIMING2_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_timing2_val)); + DEFINE(EMIF_TIMING3_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_timing3_val)); + DEFINE(EMIF_REF_CTRL_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_ref_ctrl_val)); + DEFINE(EMIF_ZQCFG_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_zqcfg_val)); + DEFINE(EMIF_PMCR_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_pmcr_val)); + DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val)); + DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET, + offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl)); + DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET, + offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh)); + DEFINE(EMIF_COS_CONFIG_OFFSET, + offsetof(struct emif_regs_amx3, emif_cos_config)); + DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET, + offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping)); + DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET, + offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map)); + DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET, + offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map)); + DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_ocp_config_val)); + DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET, + offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim)); + DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET, + offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw)); + DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET, + offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val)); + DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET, + offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw)); + DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET, + offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1)); + DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET, + offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals)); + DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3)); + + BLANK(); + + DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET, + offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt)); + DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET, + offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys)); + DEFINE(EMIF_PM_CONFIG_OFFSET, + offsetof(struct ti_emif_pm_data, ti_emif_sram_config)); + DEFINE(EMIF_PM_REGS_VIRT_OFFSET, + offsetof(struct ti_emif_pm_data, regs_virt)); + DEFINE(EMIF_PM_REGS_PHYS_OFFSET, + offsetof(struct ti_emif_pm_data, regs_phys)); + DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data)); + + BLANK(); + + DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET, + offsetof(struct ti_emif_pm_functions, save_context)); + DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, + offsetof(struct ti_emif_pm_functions, restore_context)); + DEFINE(EMIF_PM_ENTER_SR_OFFSET, + offsetof(struct ti_emif_pm_functions, enter_sr)); + DEFINE(EMIF_PM_EXIT_SR_OFFSET, + offsetof(struct ti_emif_pm_functions, exit_sr)); + DEFINE(EMIF_PM_ABORT_SR_OFFSET, + offsetof(struct ti_emif_pm_functions, abort_sr)); + DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions)); +} + struct gen_pool; int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 4b3dca173e89..7acb953298a7 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -52,7 +52,6 @@ struct tk_read_base { * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai - * @time_suspended: Accumulated suspend time * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events * @cs_was_changed_seq: The sequence number of clocksource change events @@ -95,7 +94,6 @@ struct timekeeper { ktime_t offs_real; ktime_t offs_boot; ktime_t offs_tai; - ktime_t time_suspended; s32 tai_offset; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 9737fbec7019..588a0e4b1ab9 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -33,25 +33,20 @@ extern void ktime_get_ts64(struct timespec64 *ts); extern time64_t ktime_get_seconds(void); extern time64_t __ktime_get_real_seconds(void); extern time64_t ktime_get_real_seconds(void); -extern void ktime_get_active_ts64(struct timespec64 *ts); extern int __getnstimeofday64(struct timespec64 *tv); extern void getnstimeofday64(struct timespec64 *tv); extern void getboottime64(struct timespec64 *ts); -#define ktime_get_real_ts64(ts) getnstimeofday64(ts) - -/* Clock BOOTTIME compatibility wrappers */ -static inline void get_monotonic_boottime64(struct timespec64 *ts) -{ - ktime_get_ts64(ts); -} +#define ktime_get_real_ts64(ts) getnstimeofday64(ts) /* * ktime_t based interfaces */ + enum tk_offsets { TK_OFFS_REAL, + TK_OFFS_BOOT, TK_OFFS_TAI, TK_OFFS_MAX, }; @@ -62,10 +57,6 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); extern ktime_t ktime_get_raw(void); extern u32 ktime_get_resolution_ns(void); -/* Clock BOOTTIME compatibility wrappers */ -static inline ktime_t ktime_get_boottime(void) { return ktime_get(); } -static inline u64 ktime_get_boot_ns(void) { return ktime_get(); } - /** * ktime_get_real - get the real (wall-) time in ktime_t format */ @@ -75,6 +66,17 @@ static inline ktime_t ktime_get_real(void) } /** + * ktime_get_boottime - Returns monotonic time since boot in ktime_t format + * + * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the + * time spent in suspend. + */ +static inline ktime_t ktime_get_boottime(void) +{ + return ktime_get_with_offset(TK_OFFS_BOOT); +} + +/** * ktime_get_clocktai - Returns the TAI time of day in ktime_t format */ static inline ktime_t ktime_get_clocktai(void) @@ -100,6 +102,11 @@ static inline u64 ktime_get_real_ns(void) return ktime_to_ns(ktime_get_real()); } +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} + static inline u64 ktime_get_tai_ns(void) { return ktime_to_ns(ktime_get_clocktai()); @@ -112,11 +119,17 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); +extern u64 ktime_get_boot_fast_ns(void); extern u64 ktime_get_real_fast_ns(void); /* * timespec64 interfaces utilizing the ktime based ones */ +static inline void get_monotonic_boottime64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_boottime()); +} + static inline void timekeeping_clocktai64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_clocktai()); diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index af4114d5dc17..3616b4becb59 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h @@ -9,9 +9,6 @@ extern void do_gettimeofday(struct timeval *tv); unsigned long get_seconds(void); -/* does not take xtime_lock */ -struct timespec __current_kernel_time(void); - static inline struct timespec current_kernel_time(void) { struct timespec64 now = current_kernel_time64(); diff --git a/include/linux/timer.h b/include/linux/timer.h index 2448f9cc48a3..7b066fd38248 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -8,8 +8,6 @@ #include <linux/debugobjects.h> #include <linux/stringify.h> -struct tvec_base; - struct timer_list { /* * All fields that change during normal runtime grouped to the diff --git a/include/linux/tty.h b/include/linux/tty.h index 47f8af22f216..1dd587ba6d88 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc); extern int tty_set_ldisc(struct tty_struct *tty, int disc); extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); extern void tty_ldisc_release(struct tty_struct *tty); -extern void tty_ldisc_init(struct tty_struct *tty); +extern int __must_check tty_ldisc_init(struct tty_struct *tty); extern void tty_ldisc_deinit(struct tty_struct *tty); extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count); diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 4b6b9283fa7b..8675e145ea8b 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -52,7 +52,7 @@ #define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ /* big enough to hold our biggest descriptor */ -#define USB_COMP_EP0_BUFSIZ 1024 +#define USB_COMP_EP0_BUFSIZ 4096 /* OS feature descriptor length <= 4kB */ #define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h index c71def6b310f..a240ed2a0372 100644 --- a/include/linux/vbox_utils.h +++ b/include/linux/vbox_utils.h @@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...); #define vbg_debug pr_debug #endif -/** - * Allocate memory for generic request and initialize the request header. - * - * Return: the allocated memory - * @len: Size of memory block required for the request. - * @req_type: The generic request type. - */ -void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); - -/** - * Perform a generic request. - * - * Return: VBox status code - * @gdev: The Guest extension device. - * @req: Pointer to the request structure. - */ -int vbg_req_perform(struct vbg_dev *gdev, void *req); - int vbg_hgcm_connect(struct vbg_dev *gdev, struct vmmdev_hgcm_service_location *loc, u32 *client_id, int *vbox_status); @@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, int *vbox_status); -int vbg_hgcm_call32( - struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, - struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, - int *vbox_status); - /** * Convert a VirtualBox status code to a standard Linux kernel return value. * Return: 0 or negative errno value. diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 988c7355bc22..fa1b5da2804e 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); #endif +#define virtio_device_for_each_vq(vdev, vq) \ + list_for_each_entry(vq, &vdev->vqs, list) + /** * virtio_driver - operations for a virtio I/O driver * @driver: underlying device driver (populate name and owner). diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 9318b2166439..2b0072fa5e92 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -305,4 +305,21 @@ do { \ __ret; \ }) +/** + * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit + * + * @bit: the bit of the word being waited on + * @word: the word being waited on, a kernel virtual address + * + * You can use this helper if bitflags are manipulated atomically rather than + * non-atomically under a lock. + */ +static inline void clear_and_wake_up_bit(int bit, void *word) +{ + clear_bit_unlock(bit, word); + /* See wake_up_bit() for which memory barrier you need to use. */ + smp_mb__after_atomic(); + wake_up_bit(word, bit); +} + #endif /* _LINUX_WAIT_BIT_H */ diff --git a/include/media/i2c/tvp7002.h b/include/media/i2c/tvp7002.h index 5ee007c1cead..cb213c136089 100644 --- a/include/media/i2c/tvp7002.h +++ b/include/media/i2c/tvp7002.h @@ -5,7 +5,7 @@ * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com> * * This code is partially based upon the TVP5150 driver - * written by Mauro Carvalho Chehab (mchehab@infradead.org), + * written by Mauro Carvalho Chehab <mchehab@kernel.org>, * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com> * and the TVP7002 driver in the TI LSP 2.10.00.14 * diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h index 0bda0adc744f..60a664febba0 100644 --- a/include/media/videobuf-core.h +++ b/include/media/videobuf-core.h @@ -1,11 +1,11 @@ /* * generic helper functions for handling video4linux capture buffers * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index d8b27854e3bf..01bd142b979d 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -6,11 +6,11 @@ * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h index 486a97efdb56..36c6a4ad3504 100644 --- a/include/media/videobuf-vmalloc.h +++ b/include/media/videobuf-vmalloc.h @@ -6,7 +6,7 @@ * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/include/net/bonding.h b/include/net/bonding.h index f801fc940b29..b52235158836 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -198,6 +198,7 @@ struct bonding { struct slave __rcu *primary_slave; struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ bool force_primary; + u32 nest_level; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 9a074776f70b..d1fcf2442a42 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -251,7 +251,7 @@ extern struct flow_dissector flow_keys_buf_dissector; * This structure is used to hold a digest of the full flow keys. This is a * larger "hash" of a flow to allow definitively matching specific flows where * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so - * that it can by used in CB of skb (see sch_choke for an example). + * that it can be used in CB of skb (see sch_choke for an example). */ #define FLOW_KEYS_DIGEST_LEN 16 struct flow_keys_digest { diff --git a/include/net/ife.h b/include/net/ife.h index 44b9c00f7223..e117617e3c34 100644 --- a/include/net/ife.h +++ b/include/net/ife.h @@ -12,7 +12,8 @@ void *ife_encode(struct sk_buff *skb, u16 metalen); void *ife_decode(struct sk_buff *skb, u16 *metalen); -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype, + u16 *dlen, u16 *totlen); int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval); diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index 5c40f118c0fa..df528a623548 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); +void llc_sk_stop_all_timers(struct sock *sk, bool sync); void llc_sk_free(struct sock *sk); void llc_sk_reset(struct sock *sk); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index d2279b2d61aa..b2f3a0c018e7 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2080,7 +2080,7 @@ struct ieee80211_txq { * virtual interface might not be given air time for the transmission of * the frame, as it is not synced with the AP/P2P GO yet, and thus the * deauthentication frame might not be transmitted. - > + * * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't * support QoS NDP for AP probing - that's most likely a driver bug. * diff --git a/include/net/tls.h b/include/net/tls.h index 3da8e13a6d96..b400d0bb7448 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -148,6 +148,7 @@ struct tls_context { struct scatterlist *partially_sent_record; u16 partially_sent_offset; unsigned long flags; + bool in_tcp_sendpages; u16 pending_open_record_frags; int (*push_pending_record)(struct sock *sk, int flags); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a872379b69da..45e75c36b738 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -375,6 +375,7 @@ struct xfrm_input_afinfo { int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); +void xfrm_flush_gc(void); void xfrm_state_delete_tunnel(struct xfrm_state *x); struct xfrm_type { diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index 50df5b28d2c9..8ee8991aa099 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -143,13 +143,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node); static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *data, size_t len) { - return 0; + return -ENOSYS; } static inline int rpi_firmware_property_list(struct rpi_firmware *fw, void *data, size_t tag_size) { - return 0; + return -ENOSYS; } static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) diff --git a/include/sound/control.h b/include/sound/control.h index ca13a44ae9d4..6011a58d3e20 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -23,6 +23,7 @@ */ #include <linux/wait.h> +#include <linux/nospec.h> #include <sound/asound.h> #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) @@ -148,12 +149,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type); static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) { - return id->numid - kctl->id.numid; + unsigned int ioff = id->numid - kctl->id.numid; + return array_index_nospec(ioff, kctl->count); } static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) { - return id->index - kctl->id.index; + unsigned int ioff = id->index - kctl->id.index; + return array_index_nospec(ioff, kctl->count); } static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h index 8d6cf10d27c9..eb903c3f195f 100644 --- a/include/trace/events/initcall.h +++ b/include/trace/events/initcall.h @@ -31,7 +31,11 @@ TRACE_EVENT(initcall_start, TP_ARGS(func), TP_STRUCT__entry( - __field(initcall_t, func) + /* + * Use field_struct to avoid is_signed_type() + * comparison of a function pointer + */ + __field_struct(initcall_t, func) ), TP_fast_assign( @@ -48,8 +52,12 @@ TRACE_EVENT(initcall_finish, TP_ARGS(func, ret), TP_STRUCT__entry( - __field(initcall_t, func) - __field(int, ret) + /* + * Use field_struct to avoid is_signed_type() + * comparison of a function pointer + */ + __field_struct(initcall_t, func) + __field(int, ret) ), TP_fast_assign( diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 9e96c2fe2793..077e664ac9a2 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -15,6 +15,7 @@ #define _TRACE_RXRPC_H #include <linux/tracepoint.h> +#include <linux/errqueue.h> /* * Define enums for tracing information. @@ -210,6 +211,20 @@ enum rxrpc_congest_change { rxrpc_cong_saw_nack, }; +enum rxrpc_tx_fail_trace { + rxrpc_tx_fail_call_abort, + rxrpc_tx_fail_call_ack, + rxrpc_tx_fail_call_data_frag, + rxrpc_tx_fail_call_data_nofrag, + rxrpc_tx_fail_call_final_resend, + rxrpc_tx_fail_conn_abort, + rxrpc_tx_fail_conn_challenge, + rxrpc_tx_fail_conn_response, + rxrpc_tx_fail_reject, + rxrpc_tx_fail_version_keepalive, + rxrpc_tx_fail_version_reply, +}; + #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ /* @@ -437,6 +452,19 @@ enum rxrpc_congest_change { EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ E_(RXRPC_CALL_NETWORK_ERROR, "NetError") +#define rxrpc_tx_fail_traces \ + EM(rxrpc_tx_fail_call_abort, "CallAbort") \ + EM(rxrpc_tx_fail_call_ack, "CallAck") \ + EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \ + EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \ + EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \ + EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \ + EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \ + EM(rxrpc_tx_fail_conn_response, "ConnResp") \ + EM(rxrpc_tx_fail_reject, "Reject") \ + EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \ + E_(rxrpc_tx_fail_version_reply, "VerReply") + /* * Export enum symbols via userspace. */ @@ -460,6 +488,7 @@ rxrpc_propose_ack_traces; rxrpc_propose_ack_outcomes; rxrpc_congest_modes; rxrpc_congest_changes; +rxrpc_tx_fail_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -1374,6 +1403,62 @@ TRACE_EVENT(rxrpc_resend, __entry->anno) ); +TRACE_EVENT(rxrpc_rx_icmp, + TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee, + struct sockaddr_rxrpc *srx), + + TP_ARGS(peer, ee, srx), + + TP_STRUCT__entry( + __field(unsigned int, peer ) + __field_struct(struct sock_extended_err, ee ) + __field_struct(struct sockaddr_rxrpc, srx ) + ), + + TP_fast_assign( + __entry->peer = peer->debug_id; + memcpy(&__entry->ee, ee, sizeof(__entry->ee)); + memcpy(&__entry->srx, srx, sizeof(__entry->srx)); + ), + + TP_printk("P=%08x o=%u t=%u c=%u i=%u d=%u e=%d %pISp", + __entry->peer, + __entry->ee.ee_origin, + __entry->ee.ee_type, + __entry->ee.ee_code, + __entry->ee.ee_info, + __entry->ee.ee_data, + __entry->ee.ee_errno, + &__entry->srx.transport) + ); + +TRACE_EVENT(rxrpc_tx_fail, + TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret, + enum rxrpc_tx_fail_trace what), + + TP_ARGS(debug_id, serial, ret, what), + + TP_STRUCT__entry( + __field(unsigned int, debug_id ) + __field(rxrpc_serial_t, serial ) + __field(int, ret ) + __field(enum rxrpc_tx_fail_trace, what ) + ), + + TP_fast_assign( + __entry->debug_id = debug_id; + __entry->serial = serial; + __entry->ret = ret; + __entry->what = what; + ), + + TP_printk("c=%08x r=%x ret=%d %s", + __entry->debug_id, + __entry->serial, + __entry->ret, + __print_symbolic(__entry->what, rxrpc_tx_fail_traces)) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 335d87242439..bbb08a3ef5cc 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -224,6 +224,8 @@ TRACE_EVENT(rpc_stats_latency, TP_ARGS(task, backlog, rtt, execute), TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) __field(u32, xid) __field(int, version) __string(progname, task->tk_client->cl_program->name) @@ -231,13 +233,11 @@ TRACE_EVENT(rpc_stats_latency, __field(unsigned long, backlog) __field(unsigned long, rtt) __field(unsigned long, execute) - __string(addr, - task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]) - __string(port, - task->tk_xprt->address_strings[RPC_DISPLAY_PORT]) ), TP_fast_assign( + __entry->client_id = task->tk_client->cl_clid; + __entry->task_id = task->tk_pid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); __entry->version = task->tk_client->cl_vers; __assign_str(progname, task->tk_client->cl_program->name) @@ -245,14 +245,10 @@ TRACE_EVENT(rpc_stats_latency, __entry->backlog = ktime_to_us(backlog); __entry->rtt = ktime_to_us(rtt); __entry->execute = ktime_to_us(execute); - __assign_str(addr, - task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, - task->tk_xprt->address_strings[RPC_DISPLAY_PORT]); ), - TP_printk("peer=[%s]:%s xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu", - __get_str(addr), __get_str(port), __entry->xid, + TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu", + __entry->task_id, __entry->client_id, __entry->xid, __get_str(progname), __entry->version, __get_str(procname), __entry->backlog, __entry->rtt, __entry->execute) ); diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 2f057a494d93..9a761bc6a251 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -25,6 +25,8 @@ DECLARE_EVENT_CLASS(workqueue_work, TP_printk("work struct %p", __entry->work) ); +struct pool_workqueue; + /** * workqueue_queue_work - called when a work gets queued * @req_cpu: the requested cpu diff --git a/include/uapi/linux/if_infiniband.h b/include/uapi/linux/if_infiniband.h index 050b92dcf8cf..0fc33bf30e45 100644 --- a/include/uapi/linux/if_infiniband.h +++ b/include/uapi/linux/if_infiniband.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 1065006c9bf5..b02c41e53d56 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -676,6 +676,13 @@ struct kvm_ioeventfd { __u8 pad[36]; }; +#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) +#define KVM_X86_DISABLE_EXITS_HTL (1 << 1) +#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) +#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ + KVM_X86_DISABLE_EXITS_HTL | \ + KVM_X86_DISABLE_EXITS_PAUSE) + /* for KVM_ENABLE_CAP */ struct kvm_enable_cap { /* in */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 15daf5e2638d..9c3630146cec 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2698,6 +2698,8 @@ enum nl80211_attrs { #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS +#define NL80211_WIPHY_NAME_MAXLEN 128 + #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_HT_RATES 77 #define NL80211_MAX_SUPP_REG_RULES 64 diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 912b85b52344..b8e288a1f740 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -650,11 +650,23 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* - * Indicates that the content of PERF_SAMPLE_IP points to - * the actual instruction that triggered the event. See also - * perf_event_attr::precise_ip. + * These PERF_RECORD_MISC_* flags below are safely reused + * for the following events: + * + * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events + * + * + * PERF_RECORD_MISC_EXACT_IP: + * Indicates that the content of PERF_SAMPLE_IP points to + * the actual instruction that triggered the event. See also + * perf_event_attr::precise_ip. + * + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: + * Indicates that thread was preempted in TASK_RUNNING state. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) +#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h index c34f4490d025..26ee91300e3e 100644 --- a/include/uapi/linux/random.h +++ b/include/uapi/linux/random.h @@ -35,6 +35,9 @@ /* Clear the entropy pool and associated counters. (Superuser only.) */ #define RNDCLEARPOOL _IO( 'R', 0x06 ) +/* Reseed CRNG. (Superuser only.) */ +#define RNDRESEEDCRNG _IO( 'R', 0x07 ) + struct rand_pool_info { int entropy_count; int buf_size; diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index a66b213de3d7..20c6bd0b0007 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2008 Oracle. All rights reserved. * diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 0f272818a4d2..6b58371b1f0d 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -780,24 +780,6 @@ enum { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, }; -/* proc/sys/net/irda */ -enum { - NET_IRDA_DISCOVERY=1, - NET_IRDA_DEVNAME=2, - NET_IRDA_DEBUG=3, - NET_IRDA_FAST_POLL=4, - NET_IRDA_DISCOVERY_SLOTS=5, - NET_IRDA_DISCOVERY_TIMEOUT=6, - NET_IRDA_SLOT_TIMEOUT=7, - NET_IRDA_MAX_BAUD_RATE=8, - NET_IRDA_MIN_TX_TURN_TIME=9, - NET_IRDA_MAX_TX_DATA_SIZE=10, - NET_IRDA_MAX_TX_WINDOW=11, - NET_IRDA_MAX_NOREPLY_TIME=12, - NET_IRDA_WARN_NOREPLY_TIME=13, - NET_IRDA_LAP_KEEPALIVE_TIME=14, -}; - /* CTL_FS names: */ enum diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h index 16a296612ba4..4c0338ea308a 100644 --- a/include/uapi/linux/time.h +++ b/include/uapi/linux/time.h @@ -73,7 +73,6 @@ struct __kernel_old_timeval { */ #define CLOCK_SGI_CYCLE 10 #define CLOCK_TAI 11 -#define CLOCK_MONOTONIC_ACTIVE 12 #define MAX_CLOCKS 16 #define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index c6633e97eca4..ff02287495ac 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. * diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 40297a3181ed..13b8cb563892 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -57,6 +57,21 @@ struct virtio_balloon_config { #define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */ #define VIRTIO_BALLOON_S_NR 10 +#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \ + VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \ + VIRTIO_BALLOON_S_NAMES_prefix "swap-out", \ + VIRTIO_BALLOON_S_NAMES_prefix "major-faults", \ + VIRTIO_BALLOON_S_NAMES_prefix "minor-faults", \ + VIRTIO_BALLOON_S_NAMES_prefix "free-memory", \ + VIRTIO_BALLOON_S_NAMES_prefix "total-memory", \ + VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \ + VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \ + VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \ + VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \ +} + +#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("") + /* * Memory statistics structure. * Driver fills an array of these structures and passes to device. diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h index 9acb4b7a6246..85aed672f43e 100644 --- a/include/uapi/rdma/cxgb3-abi.h +++ b/include/uapi/rdma/cxgb3-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h index 1fefd0140c26..a159ba8dcf8f 100644 --- a/include/uapi/rdma/cxgb4-abi.h +++ b/include/uapi/rdma/cxgb4-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 7092c8de4bd8..78613b609fa8 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2016 Hisilicon Limited. * diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h index 4a8f9562f7cd..e2709bb8cb18 100644 --- a/include/uapi/rdma/ib_user_cm.h +++ b/include/uapi/rdma/ib_user_cm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 04e46ea517d3..625545d862d7 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2017-2018, Mellanox Technologies inc. All rights reserved. * diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index ef92118dad97..90c0cf228020 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. diff --git a/include/uapi/rdma/ib_user_sa.h b/include/uapi/rdma/ib_user_sa.h index 0d2607f0cd20..435155d6e1c6 100644 --- a/include/uapi/rdma/ib_user_sa.h +++ b/include/uapi/rdma/ib_user_sa.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2005 Intel Corporation. All rights reserved. * diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 9be07394fdbe..6aeb03315b0b 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h index 04f64bc4045f..f74557528175 100644 --- a/include/uapi/rdma/mlx4-abi.h +++ b/include/uapi/rdma/mlx4-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index cb4a02c4a1ce..fdaf00e20649 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h index ac756cd9e807..91b12e1a6f43 100644 --- a/include/uapi/rdma/mthca-abi.h +++ b/include/uapi/rdma/mthca-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h index 35bfd4015d07..f80495baa969 100644 --- a/include/uapi/rdma/nes-abi.h +++ b/include/uapi/rdma/nes-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * Copyright (c) 2005 Topspin Communications. All rights reserved. diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index 8ba098900e9a..24c658b3c790 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* QLogic qedr NIC Driver * Copyright (c) 2015-2016 QLogic Corporation * diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index e1269024af47..0d1e78ebad05 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h index d223f4164a0f..d92d2721b28c 100644 --- a/include/uapi/rdma/rdma_user_ioctl.h +++ b/include/uapi/rdma/rdma_user_ioctl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2016 Mellanox Technologies, LTD. All rights reserved. * diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index 1f8a9e7daea4..44ef6a3b7afc 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h index 5c918276835e..78bb5d9f8d83 100644 --- a/include/xen/interface/io/sndif.h +++ b/include/xen/interface/io/sndif.h @@ -38,6 +38,13 @@ /* ****************************************************************************** + * Protocol version + ****************************************************************************** + */ +#define XENSND_PROTOCOL_VERSION 2 + +/* + ****************************************************************************** * Feature and Parameter Negotiation ****************************************************************************** * @@ -106,6 +113,8 @@ * * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" + * /local/domain/1/device/vsnd/0/0/0/evt-ring-ref = "1386" + * /local/domain/1/device/vsnd/0/0/0/evt-event-channel = "215" * *------------------------------ Stream 1, capture ---------------------------- * @@ -115,6 +124,8 @@ * * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" + * /local/domain/1/device/vsnd/0/0/1/evt-ring-ref = "1384" + * /local/domain/1/device/vsnd/0/0/1/evt-event-channel = "213" * *------------------------------- PCM device 1 -------------------------------- * @@ -128,6 +139,8 @@ * * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" + * /local/domain/1/device/vsnd/0/1/0/evt-ring-ref = "1387" + * /local/domain/1/device/vsnd/0/1/0/evt-event-channel = "351" * *------------------------------- PCM device 2 -------------------------------- * @@ -140,6 +153,8 @@ * * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" + * /local/domain/1/device/vsnd/0/2/0/evt-ring-ref = "1389" + * /local/domain/1/device/vsnd/0/2/0/evt-event-channel = "452" * ****************************************************************************** * Backend XenBus Nodes @@ -285,6 +300,23 @@ * The Xen grant reference granting permission for the backend to map * a sole page in a single page sized ring buffer. * + *--------------------- Stream Event Transport Parameters --------------------- + * + * This communication path is used to deliver asynchronous events from backend + * to frontend, set up per stream. + * + * evt-event-channel + * Values: <uint32_t> + * + * The identifier of the Xen event channel used to signal activity + * in the ring buffer. + * + * evt-ring-ref + * Values: <uint32_t> + * + * The Xen grant reference granting permission for the backend to map + * a sole page in a single page sized ring buffer. + * ****************************************************************************** * STATE DIAGRAMS ****************************************************************************** @@ -432,6 +464,20 @@ #define XENSND_OP_GET_VOLUME 5 #define XENSND_OP_MUTE 6 #define XENSND_OP_UNMUTE 7 +#define XENSND_OP_TRIGGER 8 +#define XENSND_OP_HW_PARAM_QUERY 9 + +#define XENSND_OP_TRIGGER_START 0 +#define XENSND_OP_TRIGGER_PAUSE 1 +#define XENSND_OP_TRIGGER_STOP 2 +#define XENSND_OP_TRIGGER_RESUME 3 + +/* + ****************************************************************************** + * EVENT CODES + ****************************************************************************** + */ +#define XENSND_EVT_CUR_POS 0 /* ****************************************************************************** @@ -448,6 +494,8 @@ #define XENSND_FIELD_VCARD_LONG_NAME "long-name" #define XENSND_FIELD_RING_REF "ring-ref" #define XENSND_FIELD_EVT_CHNL "event-channel" +#define XENSND_FIELD_EVT_RING_REF "evt-ring-ref" +#define XENSND_FIELD_EVT_EVT_CHNL "evt-event-channel" #define XENSND_FIELD_DEVICE_NAME "name" #define XENSND_FIELD_TYPE "type" #define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" @@ -526,7 +574,7 @@ * *---------------------------------- Requests --------------------------------- * - * All request packets have the same length (32 octets) + * All request packets have the same length (64 octets) * All request packets have common header: * 0 1 2 3 octet * +----------------+----------------+----------------+----------------+ @@ -559,11 +607,13 @@ * +----------------+----------------+----------------+----------------+ * | gref_directory | 24 * +----------------+----------------+----------------+----------------+ - * | reserved | 28 + * | period_sz | 28 + * +----------------+----------------+----------------+----------------+ + * | reserved | 32 * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ - * | reserved | 32 + * | reserved | 64 * +----------------+----------------+----------------+----------------+ * * pcm_rate - uint32_t, stream data rate, Hz @@ -571,6 +621,14 @@ * pcm_channels - uint8_t, number of channels of this stream, * [channels-min; channels-max] * buffer_sz - uint32_t, buffer size to be allocated, octets + * period_sz - uint32_t, event period size, octets + * This is the requested value of the period at which frontend would + * like to receive XENSND_EVT_CUR_POS notifications from the backend when + * stream position advances during playback/capture. + * It shows how many octets are expected to be played/captured before + * sending such an event. + * If set to 0 no XENSND_EVT_CUR_POS events are sent by the backend. + * * gref_directory - grant_ref_t, a reference to the first shared page * describing shared buffer references. At least one page exists. If shared * buffer size (buffer_sz) exceeds what can be addressed by this single page, @@ -585,6 +643,7 @@ struct xensnd_open_req { uint16_t reserved; uint32_t buffer_sz; grant_ref_t gref_directory; + uint32_t period_sz; }; /* @@ -632,7 +691,7 @@ struct xensnd_page_directory { * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ - * | reserved | 32 + * | reserved | 64 * +----------------+----------------+----------------+----------------+ * * Request read/write - used for read (for capture) or write (for playback): @@ -650,7 +709,7 @@ struct xensnd_page_directory { * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ - * | reserved | 32 + * | reserved | 64 * +----------------+----------------+----------------+----------------+ * * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write @@ -673,9 +732,11 @@ struct xensnd_rw_req { * +----------------+----------------+----------------+----------------+ * | length | 16 * +----------------+----------------+----------------+----------------+ + * | reserved | 20 + * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ - * | reserved | 32 + * | reserved | 64 * +----------------+----------------+----------------+----------------+ * * operation - XENSND_OP_SET_VOLUME for volume set @@ -713,9 +774,11 @@ struct xensnd_rw_req { * +----------------+----------------+----------------+----------------+ * | length | 16 * +----------------+----------------+----------------+----------------+ + * | reserved | 20 + * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ - * | reserved | 32 + * | reserved | 64 * +----------------+----------------+----------------+----------------+ * * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute @@ -743,32 +806,213 @@ struct xensnd_rw_req { * * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. + * + * Request stream running state change - trigger PCM stream running state + * to start, stop, pause or resume: + * + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | _OP_TRIGGER | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | reserved | 8 + * +----------------+----------------+----------------+----------------+ + * | type | reserved | 12 + * +----------------+----------------+----------------+----------------+ + * | reserved | 16 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * type - uint8_t, XENSND_OP_TRIGGER_XXX value */ +struct xensnd_trigger_req { + uint8_t type; +}; + /* - *---------------------------------- Responses -------------------------------- + * Request stream parameter ranges: request intervals and + * masks of supported ranges for stream configuration values. * - * All response packets have the same length (32 octets) + * Sound device configuration for a particular stream is a limited subset + * of the multidimensional configuration available on XenStore, e.g. + * once the frame rate has been selected there is a limited supported range + * for sample rates becomes available (which might be the same set configured + * on XenStore or less). For example, selecting 96kHz sample rate may limit + * number of channels available for such configuration from 4 to 2, etc. + * Thus, each call to XENSND_OP_HW_PARAM_QUERY may reduce configuration + * space making it possible to iteratively get the final stream configuration, + * used in XENSND_OP_OPEN request. + * + * See response format for this request. * - * Response for all requests: * 0 1 2 3 octet * +----------------+----------------+----------------+----------------+ - * | id | operation | reserved | 4 + * | id | _HW_PARAM_QUERY| reserved | 4 * +----------------+----------------+----------------+----------------+ - * | status | 8 + * | reserved | 8 + * +----------------+----------------+----------------+----------------+ + * | formats mask low 32-bit | 12 + * +----------------+----------------+----------------+----------------+ + * | formats mask high 32-bit | 16 * +----------------+----------------+----------------+----------------+ - * | reserved | 12 + * | min rate | 20 + * +----------------+----------------+----------------+----------------+ + * | max rate | 24 + * +----------------+----------------+----------------+----------------+ + * | min channels | 28 + * +----------------+----------------+----------------+----------------+ + * | max channels | 32 + * +----------------+----------------+----------------+----------------+ + * | min buffer frames | 36 + * +----------------+----------------+----------------+----------------+ + * | max buffer frames | 40 + * +----------------+----------------+----------------+----------------+ + * | min period frames | 44 + * +----------------+----------------+----------------+----------------+ + * | max period frames | 48 + * +----------------+----------------+----------------+----------------+ + * | reserved | 52 * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ - * | reserved | 32 + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * formats - uint64_t, bit mask representing values of the parameter + * made as bitwise OR of (1 << XENSND_PCM_FORMAT_XXX) values + * + * For interval parameters: + * min - uint32_t, minimum value of the parameter + * max - uint32_t, maximum value of the parameter + * + * Frame is defined as a product of the number of channels by the + * number of octets per one sample. + */ + +struct xensnd_query_hw_param { + uint64_t formats; + struct { + uint32_t min; + uint32_t max; + } rates; + struct { + uint32_t min; + uint32_t max; + } channels; + struct { + uint32_t min; + uint32_t max; + } buffer; + struct { + uint32_t min; + uint32_t max; + } period; +}; + +/* + *---------------------------------- Responses -------------------------------- + * + * All response packets have the same length (64 octets) + * + * All response packets have common header: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | operation | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | status | 8 * +----------------+----------------+----------------+----------------+ * * id - uint16_t, copied from the request * operation - uint8_t, XENSND_OP_* - copied from request * status - int32_t, response status, zero on success and -XEN_EXX on failure + * + * + * HW parameter query response - response for XENSND_OP_HW_PARAM_QUERY: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | operation | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | status | 8 + * +----------------+----------------+----------------+----------------+ + * | formats mask low 32-bit | 12 + * +----------------+----------------+----------------+----------------+ + * | formats mask high 32-bit | 16 + * +----------------+----------------+----------------+----------------+ + * | min rate | 20 + * +----------------+----------------+----------------+----------------+ + * | max rate | 24 + * +----------------+----------------+----------------+----------------+ + * | min channels | 28 + * +----------------+----------------+----------------+----------------+ + * | max channels | 32 + * +----------------+----------------+----------------+----------------+ + * | min buffer frames | 36 + * +----------------+----------------+----------------+----------------+ + * | max buffer frames | 40 + * +----------------+----------------+----------------+----------------+ + * | min period frames | 44 + * +----------------+----------------+----------------+----------------+ + * | max period frames | 48 + * +----------------+----------------+----------------+----------------+ + * | reserved | 52 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * Meaning of the values in this response is the same as for + * XENSND_OP_HW_PARAM_QUERY request. + */ + +/* + *----------------------------------- Events ---------------------------------- + * + * Events are sent via shared page allocated by the front and propagated by + * evt-event-channel/evt-ring-ref XenStore entries + * All event packets have the same length (64 octets) + * All event packets have common header: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | type | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | reserved | 8 + * +----------------+----------------+----------------+----------------+ + * + * id - uint16_t, event id, may be used by front + * type - uint8_t, type of the event + * + * + * Current stream position - event from back to front when stream's + * playback/capture position has advanced: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | _EVT_CUR_POS | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | reserved | 8 + * +----------------+----------------+----------------+----------------+ + * | position low 32-bit | 12 + * +----------------+----------------+----------------+----------------+ + * | position high 32-bit | 16 + * +----------------+----------------+----------------+----------------+ + * | reserved | 20 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * position - current value of stream's playback/capture position, octets + * */ +struct xensnd_cur_pos_evt { + uint64_t position; +}; + struct xensnd_req { uint16_t id; uint8_t operation; @@ -776,7 +1020,9 @@ struct xensnd_req { union { struct xensnd_open_req open; struct xensnd_rw_req rw; - uint8_t reserved[24]; + struct xensnd_trigger_req trigger; + struct xensnd_query_hw_param hw_param; + uint8_t reserved[56]; } op; }; @@ -785,9 +1031,53 @@ struct xensnd_resp { uint8_t operation; uint8_t reserved; int32_t status; - uint8_t reserved1[24]; + union { + struct xensnd_query_hw_param hw_param; + uint8_t reserved1[56]; + } resp; +}; + +struct xensnd_evt { + uint16_t id; + uint8_t type; + uint8_t reserved[5]; + union { + struct xensnd_cur_pos_evt cur_pos; + uint8_t reserved[56]; + } op; }; DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); +/* + ****************************************************************************** + * Back to front events delivery + ****************************************************************************** + * In order to deliver asynchronous events from back to front a shared page is + * allocated by front and its granted reference propagated to back via + * XenStore entries (evt-ring-ref/evt-event-channel). + * This page has a common header used by both front and back to synchronize + * access and control event's ring buffer, while back being a producer of the + * events and front being a consumer. The rest of the page after the header + * is used for event packets. + * + * Upon reception of an event(s) front may confirm its reception + * for either each event, group of events or none. + */ + +struct xensnd_event_page { + uint32_t in_cons; + uint32_t in_prod; + uint8_t reserved[56]; +}; + +#define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE +#define XENSND_IN_RING_OFFS (sizeof(struct xensnd_event_page)) +#define XENSND_IN_RING_SIZE (XENSND_EVENT_PAGE_SIZE - XENSND_IN_RING_OFFS) +#define XENSND_IN_RING_LEN (XENSND_IN_RING_SIZE / sizeof(struct xensnd_evt)) +#define XENSND_IN_RING(page) \ + ((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS)) +#define XENSND_IN_RING_REF(page, idx) \ + (XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN]) + #endif /* __XEN_PUBLIC_IO_SNDIF_H__ */ diff --git a/init/main.c b/init/main.c index b795aa341a3a..fd37315835b4 100644 --- a/init/main.c +++ b/init/main.c @@ -423,7 +423,7 @@ static noinline void __ref rest_init(void) /* * Enable might_sleep() and smp_processor_id() checks. - * They cannot be enabled earlier because with CONFIG_PRREMPT=y + * They cannot be enabled earlier because with CONFIG_PREEMPT=y * kernel_thread() would trigger might_sleep() splats. With * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled * already, but it's stuck on the kthreadd_done completion. @@ -1034,6 +1034,13 @@ __setup("rodata=", set_debug_rodata); static void mark_readonly(void) { if (rodata_enabled) { + /* + * load_module() results in W+X mappings, which are cleaned up + * with call_rcu_sched(). Let's make sure that queued work is + * flushed so that we don't hit false positives looking for + * insecure pages which are W+X. + */ + rcu_barrier_sched(); mark_rodata_ro(); rodata_test(); } else diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 14750e7c5ee4..027107f4be53 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -476,7 +476,7 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr) } /* decrement refcnt of all bpf_progs that are stored in this map */ -void bpf_fd_array_map_clear(struct bpf_map *map) +static void bpf_fd_array_map_clear(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; @@ -495,6 +495,7 @@ const struct bpf_map_ops prog_array_map_ops = { .map_fd_get_ptr = prog_fd_array_get_ptr, .map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, + .map_release_uref = bpf_fd_array_map_clear, }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d315b393abdd..ba03ec39efb3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs) return cnt; } +static bool bpf_prog_array_copy_core(struct bpf_prog **prog, + u32 *prog_ids, + u32 request_cnt) +{ + int i = 0; + + for (; *prog; prog++) { + if (*prog == &dummy_bpf_prog.prog) + continue; + prog_ids[i] = (*prog)->aux->id; + if (++i == request_cnt) { + prog++; + break; + } + } + + return !!(*prog); +} + int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, __u32 __user *prog_ids, u32 cnt) { struct bpf_prog **prog; unsigned long err = 0; - u32 i = 0, *ids; bool nospc; + u32 *ids; /* users of this function are doing: * cnt = bpf_prog_array_length(); @@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, return -ENOMEM; rcu_read_lock(); prog = rcu_dereference(progs)->progs; - for (; *prog; prog++) { - if (*prog == &dummy_bpf_prog.prog) - continue; - ids[i] = (*prog)->aux->id; - if (++i == cnt) { - prog++; - break; - } - } - nospc = !!(*prog); + nospc = bpf_prog_array_copy_core(prog, ids, cnt); rcu_read_unlock(); err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); kfree(ids); @@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, } int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, - __u32 __user *prog_ids, u32 request_cnt, - __u32 __user *prog_cnt) + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt) { + struct bpf_prog **prog; u32 cnt = 0; if (array) cnt = bpf_prog_array_length(array); - if (copy_to_user(prog_cnt, &cnt, sizeof(cnt))) - return -EFAULT; + *prog_cnt = cnt; /* return early if user requested only program count or nothing to copy */ if (!request_cnt || !cnt) return 0; - return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt); + /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ + prog = rcu_dereference_check(array, 1)->progs; + return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC + : 0; } static void bpf_prog_free_deferred(struct work_struct *work) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 8dd9210d7db7..098eca568c2b 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -43,6 +43,7 @@ #include <net/tcp.h> #include <linux/ptr_ring.h> #include <net/inet_common.h> +#include <linux/sched/signal.h> #define SOCK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) @@ -325,6 +326,9 @@ retry: if (ret > 0) { if (apply) apply_bytes -= ret; + + sg->offset += ret; + sg->length -= ret; size -= ret; offset += ret; if (uncharge) @@ -332,8 +336,6 @@ retry: goto retry; } - sg->length = size; - sg->offset = offset; return ret; } @@ -391,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) } while (i != md->sg_end); } -static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) +static void free_bytes_sg(struct sock *sk, int bytes, + struct sk_msg_buff *md, bool charge) { struct scatterlist *sg = md->sg_data; int i = md->sg_start, free; @@ -401,11 +404,13 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) if (bytes < free) { sg[i].length -= bytes; sg[i].offset += bytes; - sk_mem_uncharge(sk, bytes); + if (charge) + sk_mem_uncharge(sk, bytes); break; } - sk_mem_uncharge(sk, sg[i].length); + if (charge) + sk_mem_uncharge(sk, sg[i].length); put_page(sg_page(&sg[i])); bytes -= sg[i].length; sg[i].length = 0; @@ -416,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) if (i == MAX_SKB_FRAGS) i = 0; } + md->sg_start = i; } static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) @@ -523,8 +529,6 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, i = md->sg_start; do { - r->sg_data[i] = md->sg_data[i]; - size = (apply && apply_bytes < md->sg_data[i].length) ? apply_bytes : md->sg_data[i].length; @@ -535,6 +539,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, } sk_mem_charge(sk, size); + r->sg_data[i] = md->sg_data[i]; r->sg_data[i].length = size; md->sg_data[i].length -= size; md->sg_data[i].offset += size; @@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, struct sk_msg_buff *md, int flags) { + bool ingress = !!(md->flags & BPF_F_INGRESS); struct smap_psock *psock; struct scatterlist *sg; - int i, err, free = 0; - bool ingress = !!(md->flags & BPF_F_INGRESS); + int err = 0; sg = md->sg_data; @@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, out_rcu: rcu_read_unlock(); out: - i = md->sg_start; - while (sg[i].length) { - free += sg[i].length; - put_page(sg_page(&sg[i])); - sg[i].length = 0; - i++; - if (i == MAX_SKB_FRAGS) - i = 0; - } - return free; + free_bytes_sg(NULL, send, md, false); + return err; } static inline void bpf_md_init(struct smap_psock *psock) @@ -700,19 +697,26 @@ more_data: err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); lock_sock(sk); + if (unlikely(err < 0)) { + free_start_sg(sk, m); + psock->sg_size = 0; + if (!cork) + *copied -= send; + } else { + psock->sg_size -= send; + } + if (cork) { free_start_sg(sk, m); + psock->sg_size = 0; kfree(m); m = NULL; + err = 0; } - if (unlikely(err)) - *copied -= err; - else - psock->sg_size -= send; break; case __SK_DROP: default: - free_bytes_sg(sk, send, m); + free_bytes_sg(sk, send, m, true); apply_bytes_dec(psock, send); *copied -= send; psock->sg_size -= send; @@ -732,6 +736,26 @@ out_err: return err; } +static int bpf_wait_data(struct sock *sk, + struct smap_psock *psk, int flags, + long timeo, int *err) +{ + int rc; + + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + add_wait_queue(sk_sleep(sk), &wait); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + rc = sk_wait_event(sk, &timeo, + !list_empty(&psk->ingress) || + !skb_queue_empty(&sk->sk_receive_queue), + &wait); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + remove_wait_queue(sk_sleep(sk), &wait); + + return rc; +} + static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { @@ -755,6 +779,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); lock_sock(sk); +bytes_ready: while (copied != len) { struct scatterlist *sg; struct sk_msg_buff *md; @@ -809,6 +834,28 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } + if (!copied) { + long timeo; + int data; + int err = 0; + + timeo = sock_rcvtimeo(sk, nonblock); + data = bpf_wait_data(sk, psock, flags, timeo, &err); + + if (data) { + if (!skb_queue_empty(&sk->sk_receive_queue)) { + release_sock(sk); + smap_release_sock(psock, sk); + copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + return copied; + } + goto bytes_ready; + } + + if (err) + copied = err; + } + release_sock(sk); smap_release_sock(psock, sk); return copied; @@ -1442,9 +1489,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); - if (attr->value_size > KMALLOC_MAX_SIZE) - return ERR_PTR(-E2BIG); - err = bpf_tcp_ulp_register(); if (err && err != -EEXIST) return ERR_PTR(err); @@ -1834,7 +1878,7 @@ static int sock_map_update_elem(struct bpf_map *map, return err; } -static void sock_map_release(struct bpf_map *map, struct file *map_file) +static void sock_map_release(struct bpf_map *map) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); struct bpf_prog *orig; @@ -1858,7 +1902,7 @@ const struct bpf_map_ops sock_map_ops = { .map_get_next_key = sock_map_get_next_key, .map_update_elem = sock_map_update_elem, .map_delete_elem = sock_map_delete_elem, - .map_release = sock_map_release, + .map_release_uref = sock_map_release, }; BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4ca46df19c9a..016ef9025827 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -26,6 +26,7 @@ #include <linux/cred.h> #include <linux/timekeeping.h> #include <linux/ctype.h> +#include <linux/nospec.h> #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ @@ -102,12 +103,14 @@ const struct bpf_map_ops bpf_map_offload_ops = { static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) { const struct bpf_map_ops *ops; + u32 type = attr->map_type; struct bpf_map *map; int err; - if (attr->map_type >= ARRAY_SIZE(bpf_map_types)) + if (type >= ARRAY_SIZE(bpf_map_types)) return ERR_PTR(-EINVAL); - ops = bpf_map_types[attr->map_type]; + type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); + ops = bpf_map_types[type]; if (!ops) return ERR_PTR(-EINVAL); @@ -122,7 +125,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) if (IS_ERR(map)) return map; map->ops = ops; - map->map_type = attr->map_type; + map->map_type = type; return map; } @@ -257,8 +260,8 @@ static void bpf_map_free_deferred(struct work_struct *work) static void bpf_map_put_uref(struct bpf_map *map) { if (atomic_dec_and_test(&map->usercnt)) { - if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) - bpf_fd_array_map_clear(map); + if (map->ops->map_release_uref) + map->ops->map_release_uref(map); } } @@ -871,11 +874,17 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = { static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) { - if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type]) + const struct bpf_prog_ops *ops; + + if (type >= ARRAY_SIZE(bpf_prog_types)) + return -EINVAL; + type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); + ops = bpf_prog_types[type]; + if (!ops) return -EINVAL; if (!bpf_prog_is_dev_bound(prog->aux)) - prog->aux->ops = bpf_prog_types[type]; + prog->aux->ops = ops; else prog->aux->ops = &bpf_offload_prog_ops; prog->type = type; diff --git a/kernel/compat.c b/kernel/compat.c index 6d21894806b4..92d8c98c0f57 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp) { struct compat_timex tx32; + memset(txc, 0, sizeof(struct timex)); if (copy_from_user(&tx32, utp, sizeof(struct compat_timex))) return -EFAULT; diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 772a43fea825..c187aa3df3c8 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack) goto exit; } - if (count > 1) { - /* If the allocation failed, give up */ - if (!callchain_cpus_entries) - err = -ENOMEM; - /* - * If requesting per event more than the global cap, - * return a different error to help userspace figure - * this out. - * - * And also do it here so that we have &callchain_mutex held. - */ - if (event_max_stack > sysctl_perf_event_max_stack) - err = -EOVERFLOW; + /* + * If requesting per event more than the global cap, + * return a different error to help userspace figure + * this out. + * + * And also do it here so that we have &callchain_mutex held. + */ + if (event_max_stack > sysctl_perf_event_max_stack) { + err = -EOVERFLOW; goto exit; } - err = alloc_callchain_buffers(); + if (count == 1) + err = alloc_callchain_buffers(); exit: if (err) atomic_dec(&nr_callchain_events); diff --git a/kernel/events/core.c b/kernel/events/core.c index 2d5fe26551f8..67612ce359ad 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task, }, }; + if (!sched_in && task->state == TASK_RUNNING) + switch_event.event_id.header.misc |= + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; + perf_iterate_sb(perf_event_switch_output, &switch_event, NULL); @@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, * __u16 sample size limit. */ if (attr->sample_stack_user >= USHRT_MAX) - ret = -EINVAL; + return -EINVAL; else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) - ret = -EINVAL; + return -EINVAL; } if (!attr->sample_max_stack) diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 6c6b3c48db71..1d8ca9ea9979 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/circ_buf.h> #include <linux/poll.h> +#include <linux/nospec.h> #include "internal.h" @@ -867,8 +868,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) return NULL; /* AUX space */ - if (pgoff >= rb->aux_pgoff) - return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); + if (pgoff >= rb->aux_pgoff) { + int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages); + return virt_to_page(rb->aux_pages[aux_pgoff]); + } } return __perf_mmap_to_page(rb, pgoff); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index ce6848e46e94..1725b902983f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -491,7 +491,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) if (!uprobe) return NULL; - uprobe->inode = igrab(inode); + uprobe->inode = inode; uprobe->offset = offset; init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); @@ -502,7 +502,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) if (cur_uprobe) { kfree(uprobe); uprobe = cur_uprobe; - iput(inode); } return uprobe; @@ -701,7 +700,6 @@ static void delete_uprobe(struct uprobe *uprobe) rb_erase(&uprobe->rb_node, &uprobes_tree); spin_unlock(&uprobes_treelock); RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ - iput(uprobe->inode); put_uprobe(uprobe); } @@ -873,7 +871,8 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u * tuple). Creation refcount stops uprobe_unregister from freeing the * @uprobe even before the register operation is complete. Creation * refcount is released when the last @uc for the @uprobe - * unregisters. + * unregisters. Caller of uprobe_register() is required to keep @inode + * (and the containing mount) referenced. * * Return errno if it cannot successully install probes * else return 0 (success) diff --git a/kernel/fork.c b/kernel/fork.c index 242c8c93d285..a5d21c42acfc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) if (!s) continue; -#ifdef CONFIG_DEBUG_KMEMLEAK /* Clear stale pointers from reused stack. */ memset(s->addr, 0, THREAD_SIZE); -#endif + tsk->stack_vm_area = s; return s->addr; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 102160ff5c66..ea619021d901 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) struct kprobe_blacklist_entry *ent = list_entry(v, struct kprobe_blacklist_entry, list); - seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, + seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, (void *)ent->end_addr, (void *)ent->start_addr); return 0; } diff --git a/kernel/kthread.c b/kernel/kthread.c index cd50e99202b0..2017a39ab490 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -55,7 +55,6 @@ enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP, KTHREAD_SHOULD_PARK, - KTHREAD_IS_PARKED, }; static inline void set_kthread_struct(void *kthread) @@ -177,14 +176,12 @@ void *kthread_probe_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { - __set_current_state(TASK_PARKED); - while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { - if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) - complete(&self->parked); + for (;;) { + set_current_state(TASK_PARKED); + if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) + break; schedule(); - __set_current_state(TASK_PARKED); } - clear_bit(KTHREAD_IS_PARKED, &self->flags); __set_current_state(TASK_RUNNING); } @@ -194,6 +191,11 @@ void kthread_parkme(void) } EXPORT_SYMBOL_GPL(kthread_parkme); +void kthread_park_complete(struct task_struct *k) +{ + complete(&to_kthread(k)->parked); +} + static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -450,22 +452,15 @@ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = to_kthread(k); - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* - * We clear the IS_PARKED bit here as we don't wait - * until the task has left the park code. So if we'd - * park before that happens we'd see the IS_PARKED bit - * which might be about to be cleared. + * Newly created kthread was parked when the CPU was offline. + * The binding was lost and we need to set it again. */ - if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - /* - * Newly created kthread was parked when the CPU was offline. - * The binding was lost and we need to set it again. - */ - if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) - __kthread_bind(k, kthread->cpu, TASK_PARKED); - wake_up_state(k, TASK_PARKED); - } + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) + __kthread_bind(k, kthread->cpu, TASK_PARKED); + + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + wake_up_state(k, TASK_PARKED); } EXPORT_SYMBOL_GPL(kthread_unpark); @@ -488,12 +483,13 @@ int kthread_park(struct task_struct *k) if (WARN_ON(k->flags & PF_EXITING)) return -ENOSYS; - if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); - if (k != current) { - wake_up_process(k); - wait_for_completion(&kthread->parked); - } + if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) + return -EBUSY; + + set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + if (k != current) { + wake_up_process(k); + wait_for_completion(&kthread->parked); } return 0; diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c index fdac27588d60..83958c814439 100644 --- a/kernel/livepatch/shadow.c +++ b/kernel/livepatch/shadow.c @@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id) } EXPORT_SYMBOL_GPL(klp_shadow_get); -static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags, bool warn_on_exist) +static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data, + bool warn_on_exist) { struct klp_shadow *new_shadow; void *shadow_data; @@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, if (shadow_data) goto exists; - /* Allocate a new shadow variable for use inside the lock below */ + /* + * Allocate a new shadow variable. Fill it with zeroes by default. + * More complex setting can be done by @ctor function. But it is + * called only when the buffer is really used (under klp_shadow_lock). + */ new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); if (!new_shadow) return NULL; - new_shadow->obj = obj; - new_shadow->id = id; - - /* Initialize the shadow variable if data provided */ - if (data) - memcpy(new_shadow->data, data, size); - /* Look for <obj, id> again under the lock */ spin_lock_irqsave(&klp_shadow_lock, flags); shadow_data = klp_shadow_get(obj, id); @@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, goto exists; } + new_shadow->obj = obj; + new_shadow->id = id; + + if (ctor) { + int err; + + err = ctor(obj, new_shadow->data, ctor_data); + if (err) { + spin_unlock_irqrestore(&klp_shadow_lock, flags); + kfree(new_shadow); + pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n", + obj, id, err); + return NULL; + } + } + /* No <obj, id> found, so attach the newly allocated one */ hash_add_rcu(klp_shadow_hash, &new_shadow->node, (unsigned long)new_shadow->obj); @@ -170,26 +185,32 @@ exists: * klp_shadow_alloc() - allocate and add a new shadow variable * @obj: pointer to parent object * @id: data identifier - * @data: pointer to data to attach to parent * @size: size of attached data * @gfp_flags: GFP mask for allocation + * @ctor: custom constructor to initialize the shadow data (optional) + * @ctor_data: pointer to any data needed by @ctor (optional) + * + * Allocates @size bytes for new shadow variable data using @gfp_flags. + * The data are zeroed by default. They are further initialized by @ctor + * function if it is not NULL. The new shadow variable is then added + * to the global hashtable. * - * Allocates @size bytes for new shadow variable data using @gfp_flags - * and copies @size bytes from @data into the new shadow variable's own - * data space. If @data is NULL, @size bytes are still allocated, but - * no copy is performed. The new shadow variable is then added to the - * global hashtable. + * If an existing <obj, id> shadow variable can be found, this routine will + * issue a WARN, exit early and return NULL. * - * If an existing <obj, id> shadow variable can be found, this routine - * will issue a WARN, exit early and return NULL. + * This function guarantees that the constructor function is called only when + * the variable did not exist before. The cost is that @ctor is called + * in atomic context under a spin lock. * * Return: the shadow variable data element, NULL on duplicate or * failure. */ -void *klp_shadow_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags) +void *klp_shadow_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data) { - return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true); + return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, + ctor, ctor_data, true); } EXPORT_SYMBOL_GPL(klp_shadow_alloc); @@ -197,37 +218,51 @@ EXPORT_SYMBOL_GPL(klp_shadow_alloc); * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable * @obj: pointer to parent object * @id: data identifier - * @data: pointer to data to attach to parent * @size: size of attached data * @gfp_flags: GFP mask for allocation + * @ctor: custom constructor to initialize the shadow data (optional) + * @ctor_data: pointer to any data needed by @ctor (optional) * * Returns a pointer to existing shadow data if an <obj, id> shadow * variable is already present. Otherwise, it creates a new shadow * variable like klp_shadow_alloc(). * - * This function guarantees that only one shadow variable exists with - * the given @id for the given @obj. It also guarantees that the shadow - * variable will be initialized by the given @data only when it did not - * exist before. + * This function guarantees that only one shadow variable exists with the given + * @id for the given @obj. It also guarantees that the constructor function + * will be called only when the variable did not exist before. The cost is + * that @ctor is called in atomic context under a spin lock. * * Return: the shadow variable data element, NULL on failure. */ -void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags) +void *klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data) { - return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false); + return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, + ctor, ctor_data, false); } EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); +static void klp_shadow_free_struct(struct klp_shadow *shadow, + klp_shadow_dtor_t dtor) +{ + hash_del_rcu(&shadow->node); + if (dtor) + dtor(shadow->obj, shadow->data); + kfree_rcu(shadow, rcu_head); +} + /** * klp_shadow_free() - detach and free a <obj, id> shadow variable * @obj: pointer to parent object * @id: data identifier + * @dtor: custom callback that can be used to unregister the variable + * and/or free data that the shadow variable points to (optional) * * This function releases the memory for this <obj, id> shadow variable * instance, callers should stop referencing it accordingly. */ -void klp_shadow_free(void *obj, unsigned long id) +void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) { struct klp_shadow *shadow; unsigned long flags; @@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id) (unsigned long)obj) { if (klp_shadow_match(shadow, obj, id)) { - hash_del_rcu(&shadow->node); - kfree_rcu(shadow, rcu_head); + klp_shadow_free_struct(shadow, dtor); break; } } @@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free); /** * klp_shadow_free_all() - detach and free all <*, id> shadow variables * @id: data identifier + * @dtor: custom callback that can be used to unregister the variable + * and/or free data that the shadow variable points to (optional) * * This function releases the memory for all <*, id> shadow variable * instances, callers should stop referencing them accordingly. */ -void klp_shadow_free_all(unsigned long id) +void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) { struct klp_shadow *shadow; unsigned long flags; @@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id) /* Delete all <*, id> from hash */ hash_for_each(klp_shadow_hash, i, shadow, node) { - if (klp_shadow_match(shadow, shadow->obj, id)) { - hash_del_rcu(&shadow->node); - kfree_rcu(shadow, rcu_head); - } + if (klp_shadow_match(shadow, shadow->obj, id)) + klp_shadow_free_struct(shadow, dtor); } spin_unlock_irqrestore(&klp_shadow_lock, flags); diff --git a/kernel/module.c b/kernel/module.c index a6e43a5806a1..c9bea7f2b43e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr, { struct module_sect_attr *sattr = container_of(mattr, struct module_sect_attr, mattr); - return sprintf(buf, "0x%pK\n", (void *)sattr->address); + return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? + (void *)sattr->address : NULL); } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) @@ -3516,6 +3517,11 @@ static noinline int do_init_module(struct module *mod) * walking this with preempt disabled. In all the failure paths, we * call synchronize_sched(), but we don't want to slow down the success * path, so use actual RCU here. + * Note that module_alloc() on most architectures creates W+X page + * mappings which won't be cleaned up until do_free_init() runs. Any + * code such as mark_rodata_ro() which depends on those mappings to + * be cleaned up needs to sync with the queued work - ie + * rcu_barrier_sched() */ call_rcu_sched(&freeinit->rcu, do_free_init); mutex_unlock(&module_mutex); diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 6be6c575b6cd..2d4ff5353ded 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -2,6 +2,7 @@ /* * Auto-group scheduling implementation: */ +#include <linux/nospec.h> #include "sched.h" unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; @@ -209,7 +210,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) static unsigned long next = INITIAL_JIFFIES; struct autogroup *ag; unsigned long shares; - int err; + int err, idx; if (nice < MIN_NICE || nice > MAX_NICE) return -EINVAL; @@ -227,7 +228,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) next = HZ / 10 + jiffies; ag = autogroup_task_get(p); - shares = scale_load(sched_prio_to_weight[nice + 20]); + + idx = array_index_nospec(nice + 20, 40); + shares = scale_load(sched_prio_to_weight[idx]); down_write(&ag->lock); err = sched_group_set_shares(ag->tg, shares); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5e10aaeebfcc..092f7c4de903 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7,6 +7,9 @@ */ #include "sched.h" +#include <linux/kthread.h> +#include <linux/nospec.h> + #include <asm/switch_to.h> #include <asm/tlb.h> @@ -2718,20 +2721,28 @@ static struct rq *finish_task_switch(struct task_struct *prev) membarrier_mm_sync_core_before_usermode(mm); mmdrop(mm); } - if (unlikely(prev_state == TASK_DEAD)) { - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); + if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) { + switch (prev_state) { + case TASK_DEAD: + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. - */ - kprobe_flush_task(prev); + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(prev); + + /* Task is done with its stack. */ + put_task_stack(prev); - /* Task is done with its stack. */ - put_task_stack(prev); + put_task_struct(prev); + break; - put_task_struct(prev); + case TASK_PARKED: + kthread_park_complete(prev); + break; + } } tick_nohz_task_switch(); @@ -3498,23 +3509,8 @@ static void __sched notrace __schedule(bool preempt) void __noreturn do_task_dead(void) { - /* - * The setting of TASK_RUNNING by try_to_wake_up() may be delayed - * when the following two conditions become true. - * - There is race condition of mmap_sem (It is acquired by - * exit_mm()), and - * - SMI occurs before setting TASK_RUNINNG. - * (or hypervisor of virtual machine switches to other guest) - * As a result, we may become TASK_RUNNING after becoming TASK_DEAD - * - * To avoid it, we have to wait for releasing tsk->pi_lock which - * is held by try_to_wake_up() - */ - raw_spin_lock_irq(¤t->pi_lock); - raw_spin_unlock_irq(¤t->pi_lock); - /* Causes final put_task_struct in finish_task_switch(): */ - __set_current_state(TASK_DEAD); + set_special_state(TASK_DEAD); /* Tell freezer to ignore us: */ current->flags |= PF_NOFREEZE; @@ -6928,11 +6924,15 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, s64 nice) { unsigned long weight; + int idx; if (nice < MIN_NICE || nice > MAX_NICE) return -ERANGE; - weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO]; + idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; + idx = array_index_nospec(idx, 40); + weight = sched_prio_to_weight[idx]; + return sched_group_set_shares(css_tg(css), scale_load(weight)); } #endif diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d2c6083304b4..e13df951aca7 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -305,7 +305,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, * Do not reduce the frequency if the CPU has not been idle * recently, as the reduction is likely to be premature then. */ - if (busy && next_f < sg_policy->next_freq) { + if (busy && next_f < sg_policy->next_freq && + sg_policy->next_freq != UINT_MAX) { next_f = sg_policy->next_freq; /* Reset cached freq as next_freq has changed */ @@ -396,19 +397,6 @@ static void sugov_irq_work(struct irq_work *irq_work) sg_policy = container_of(irq_work, struct sugov_policy, irq_work); - /* - * For RT tasks, the schedutil governor shoots the frequency to maximum. - * Special care must be taken to ensure that this kthread doesn't result - * in the same behavior. - * - * This is (mostly) guaranteed by the work_in_progress flag. The flag is - * updated only at the end of the sugov_work() function and before that - * the schedutil governor rejects all other frequency scaling requests. - * - * There is a very rare case though, where the RT thread yields right - * after the work_in_progress flag is cleared. The effects of that are - * neglected for now. - */ kthread_queue_work(&sg_policy->worker, &sg_policy->work); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 54dc31e7ab9b..79f574dba096 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1854,7 +1854,6 @@ static int task_numa_migrate(struct task_struct *p) static void numa_migrate_preferred(struct task_struct *p) { unsigned long interval = HZ; - unsigned long numa_migrate_retry; /* This task has no NUMA fault statistics yet */ if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) @@ -1862,18 +1861,7 @@ static void numa_migrate_preferred(struct task_struct *p) /* Periodically retry migrating the task to the preferred node */ interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); - numa_migrate_retry = jiffies + interval; - - /* - * Check that the new retry threshold is after the current one. If - * the retry is in the future, it implies that wake_affine has - * temporarily asked NUMA balancing to backoff from placement. - */ - if (numa_migrate_retry > p->numa_migrate_retry) - return; - - /* Safe to try placing the task on the preferred node */ - p->numa_migrate_retry = numa_migrate_retry; + p->numa_migrate_retry = jiffies + interval; /* Success if task is already running on preferred CPU */ if (task_node(p) == p->numa_preferred_nid) @@ -5922,48 +5910,6 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; } -#ifdef CONFIG_NUMA_BALANCING -static void -update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target) -{ - unsigned long interval; - - if (!static_branch_likely(&sched_numa_balancing)) - return; - - /* If balancing has no preference then continue gathering data */ - if (p->numa_preferred_nid == -1) - return; - - /* - * If the wakeup is not affecting locality then it is neutral from - * the perspective of NUMA balacing so continue gathering data. - */ - if (cpu_to_node(prev_cpu) == cpu_to_node(target)) - return; - - /* - * Temporarily prevent NUMA balancing trying to place waker/wakee after - * wakee has been moved by wake_affine. This will potentially allow - * related tasks to converge and update their data placement. The - * 4 * numa_scan_period is to allow the two-pass filter to migrate - * hot data to the wakers node. - */ - interval = max(sysctl_numa_balancing_scan_delay, - p->numa_scan_period << 2); - p->numa_migrate_retry = jiffies + msecs_to_jiffies(interval); - - interval = max(sysctl_numa_balancing_scan_delay, - current->numa_scan_period << 2); - current->numa_migrate_retry = jiffies + msecs_to_jiffies(interval); -} -#else -static void -update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target) -{ -} -#endif - static int wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) { @@ -5979,7 +5925,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, if (target == nr_cpumask_bits) return prev_cpu; - update_wa_numa_placement(p, prev_cpu, target); schedstat_inc(sd->ttwu_move_affine); schedstat_inc(p->se.statistics.nr_wakeups_affine); return target; @@ -9847,6 +9792,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (curr_cost > this_rq->max_idle_balance_cost) this_rq->max_idle_balance_cost = curr_cost; +out: /* * While browsing the domains, we released the rq lock, a task could * have been enqueued in the meantime. Since we're not going idle, @@ -9855,7 +9801,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (this_rq->cfs.h_nr_running && !pulled_task) pulled_task = 1; -out: /* Move the next balance forward */ if (time_after(this_rq->next_balance, next_balance)) this_rq->next_balance = next_balance; diff --git a/kernel/signal.c b/kernel/signal.c index d4ccea599692..9c33163a6165 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1961,14 +1961,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) return; } + set_special_state(TASK_TRACED); + /* * We're committing to trapping. TRACED should be visible before * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. + * + * TRACER TRACEE + * + * ptrace_attach() + * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) + * do_wait() + * set_current_state() smp_wmb(); + * ptrace_do_wait() + * wait_task_stopped() + * task_stopped_code() + * [L] task_is_traced() [S] task_clear_jobctl_trapping(); */ - set_current_state(TASK_TRACED); + smp_wmb(); current->last_siginfo = info; current->exit_code = exit_code; @@ -2176,7 +2189,7 @@ static bool do_signal_stop(int signr) if (task_participate_group_stop(current)) notify = CLD_STOPPED; - __set_current_state(TASK_STOPPED); + set_special_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); /* diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7591261652d..64c0291b579c 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -21,6 +21,7 @@ #include <linux/smpboot.h> #include <linux/atomic.h> #include <linux/nmi.h> +#include <linux/sched/wake_q.h> /* * Structure to determine completion condition and record errors. May @@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done) } static void __cpu_stop_queue_work(struct cpu_stopper *stopper, - struct cpu_stop_work *work) + struct cpu_stop_work *work, + struct wake_q_head *wakeq) { list_add_tail(&work->list, &stopper->works); - wake_up_process(stopper->thread); + wake_q_add(wakeq, stopper->thread); } /* queue @work to @stopper. if offline, @work is completed immediately */ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + DEFINE_WAKE_Q(wakeq); unsigned long flags; bool enabled; spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) - __cpu_stop_queue_work(stopper, work); + __cpu_stop_queue_work(stopper, work, &wakeq); else if (work->done) cpu_stop_signal_done(work->done); spin_unlock_irqrestore(&stopper->lock, flags); + wake_up_q(&wakeq); + return enabled; } @@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, { struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); + DEFINE_WAKE_Q(wakeq); int err; retry: spin_lock_irq(&stopper1->lock); @@ -252,8 +258,8 @@ retry: goto unlock; err = 0; - __cpu_stop_queue_work(stopper1, work1); - __cpu_stop_queue_work(stopper2, work2); + __cpu_stop_queue_work(stopper1, work1, &wakeq); + __cpu_stop_queue_work(stopper2, work2, &wakeq); unlock: spin_unlock(&stopper2->lock); spin_unlock_irq(&stopper1->lock); @@ -263,6 +269,9 @@ unlock: cpu_relax(); goto retry; } + + wake_up_q(&wakeq); + return err; } /** diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index e8c0dab4fd65..07148b497451 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -704,24 +704,6 @@ static const struct bin_table bin_net_netfilter_table[] = { {} }; -static const struct bin_table bin_net_irda_table[] = { - { CTL_INT, NET_IRDA_DISCOVERY, "discovery" }, - { CTL_STR, NET_IRDA_DEVNAME, "devname" }, - { CTL_INT, NET_IRDA_DEBUG, "debug" }, - { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" }, - { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" }, - { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" }, - { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" }, - { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" }, - { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" }, - { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" }, - { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" }, - { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" }, - { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" }, - { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" }, - {} -}; - static const struct bin_table bin_net_table[] = { { CTL_DIR, NET_CORE, "core", bin_net_core_table }, /* NET_ETHER not used */ @@ -743,7 +725,7 @@ static const struct bin_table bin_net_table[] = { { CTL_DIR, NET_LLC, "llc", bin_net_llc_table }, { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table }, /* NET_DCCP "dccp" no longer used */ - { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table }, + /* NET_IRDA "irda" no longer used */ { CTL_INT, 2089, "nf_conntrack_max" }, {} }; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 0e974cface0b..84f37420fcf5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock); static int watchdog_running; static atomic_t watchdog_reset_pending; +static void inline clocksource_watchdog_lock(unsigned long *flags) +{ + spin_lock_irqsave(&watchdog_lock, *flags); +} + +static void inline clocksource_watchdog_unlock(unsigned long *flags) +{ + spin_unlock_irqrestore(&watchdog_lock, *flags); +} + static int clocksource_watchdog_kthread(void *data); static void __clocksource_change_rating(struct clocksource *cs, int rating); @@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs) cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; + /* + * If the clocksource is registered clocksource_watchdog_kthread() will + * re-rate and re-select. + */ + if (list_empty(&cs->list)) { + cs->rating = 0; + return; + } + if (cs->mark_unstable) cs->mark_unstable(cs); + /* kick clocksource_watchdog_kthread() */ if (finished_booting) schedule_work(&watchdog_work); } @@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs) * clocksource_mark_unstable - mark clocksource unstable via watchdog * @cs: clocksource to be marked unstable * - * This function is called instead of clocksource_change_rating from - * cpu hotplug code to avoid a deadlock between the clocksource mutex - * and the cpu hotplug mutex. It defers the update of the clocksource - * to the watchdog thread. + * This function is called by the x86 TSC code to mark clocksources as unstable; + * it defers demotion and re-selection to a kthread. */ void clocksource_mark_unstable(struct clocksource *cs) { @@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs) spin_lock_irqsave(&watchdog_lock, flags); if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { - if (list_empty(&cs->wd_list)) + if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) list_add(&cs->wd_list, &watchdog_list); __clocksource_unstable(cs); } @@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void) static void clocksource_enqueue_watchdog(struct clocksource *cs) { - unsigned long flags; + INIT_LIST_HEAD(&cs->wd_list); - spin_lock_irqsave(&watchdog_lock, flags); if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a clocksource to be watched. */ list_add(&cs->wd_list, &watchdog_list); @@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } - spin_unlock_irqrestore(&watchdog_lock, flags); } static void clocksource_select_watchdog(bool fallback) @@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback) static void clocksource_dequeue_watchdog(struct clocksource *cs) { - unsigned long flags; - - spin_lock_irqsave(&watchdog_lock, flags); if (cs != watchdog) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a watched clocksource. */ @@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) clocksource_stop_watchdog(); } } - spin_unlock_irqrestore(&watchdog_lock, flags); } static int __clocksource_watchdog_kthread(void) { struct clocksource *cs, *tmp; unsigned long flags; - LIST_HEAD(unstable); int select = 0; spin_lock_irqsave(&watchdog_lock, flags); list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { if (cs->flags & CLOCK_SOURCE_UNSTABLE) { list_del_init(&cs->wd_list); - list_add(&cs->wd_list, &unstable); + __clocksource_change_rating(cs, 0); select = 1; } if (cs->flags & CLOCK_SOURCE_RESELECT) { @@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void) clocksource_stop_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); - /* Needs to be done outside of watchdog lock */ - list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { - list_del_init(&cs->wd_list); - __clocksource_change_rating(cs, 0); - } return select; } @@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } void clocksource_mark_unstable(struct clocksource *cs) { } +static void inline clocksource_watchdog_lock(unsigned long *flags) { } +static void inline clocksource_watchdog_unlock(unsigned long *flags) { } + #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ /** @@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); */ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) { + unsigned long flags; /* Initialize mult/shift and max_idle_ns */ __clocksource_update_freq_scale(cs, scale, freq); /* Add clocksource to the clocksource list */ mutex_lock(&clocksource_mutex); + + clocksource_watchdog_lock(&flags); clocksource_enqueue(cs); clocksource_enqueue_watchdog(cs); + clocksource_watchdog_unlock(&flags); + clocksource_select(); clocksource_select_watchdog(false); mutex_unlock(&clocksource_mutex); @@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating) */ void clocksource_change_rating(struct clocksource *cs, int rating) { + unsigned long flags; + mutex_lock(&clocksource_mutex); + clocksource_watchdog_lock(&flags); __clocksource_change_rating(cs, rating); + clocksource_watchdog_unlock(&flags); + clocksource_select(); clocksource_select_watchdog(false); mutex_unlock(&clocksource_mutex); @@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating); */ static int clocksource_unbind(struct clocksource *cs) { + unsigned long flags; + if (clocksource_is_watchdog(cs)) { /* Select and try to install a replacement watchdog. */ clocksource_select_watchdog(true); @@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs) if (curr_clocksource == cs) return -EBUSY; } + + clocksource_watchdog_lock(&flags); clocksource_dequeue_watchdog(cs); list_del_init(&cs->list); + clocksource_watchdog_unlock(&flags); + return 0; } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index eda1210ce50f..14e858753d76 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -91,6 +91,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = .get_time = &ktime_get_real, }, { + .index = HRTIMER_BASE_BOOTTIME, + .clockid = CLOCK_BOOTTIME, + .get_time = &ktime_get_boottime, + }, + { .index = HRTIMER_BASE_TAI, .clockid = CLOCK_TAI, .get_time = &ktime_get_clocktai, @@ -106,6 +111,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = .get_time = &ktime_get_real, }, { + .index = HRTIMER_BASE_BOOTTIME_SOFT, + .clockid = CLOCK_BOOTTIME, + .get_time = &ktime_get_boottime, + }, + { .index = HRTIMER_BASE_TAI_SOFT, .clockid = CLOCK_TAI, .get_time = &ktime_get_clocktai, @@ -119,7 +129,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, - [CLOCK_BOOTTIME] = HRTIMER_BASE_MONOTONIC, + [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, [CLOCK_TAI] = HRTIMER_BASE_TAI, }; @@ -571,12 +581,14 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) { ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; + ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, - offs_real, offs_tai); + offs_real, offs_boot, offs_tai); base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; + base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; return now; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 2541bd89f20e..5a6251ac6f7a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, u64 *newval, u64 *oldval) { u64 now; + int ret; WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); + ret = cpu_timer_sample_group(clock_idx, tsk, &now); - if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) { + if (oldval && ret != -EINVAL) { /* * We are setting itimer. The *oldval is absolute and we update * it to be relative, *newval argument is relative and we update diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index e0dbae98db9d..69a937c3cd81 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -83,8 +83,6 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp) case CLOCK_BOOTTIME: get_monotonic_boottime64(tp); break; - case CLOCK_MONOTONIC_ACTIVE: - ktime_get_active_ts64(tp); default: return -EINVAL; } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index b6899b5060bd..10b7186d0638 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -252,16 +252,15 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 * return 0; } -static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) +static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) { - timekeeping_clocktai64(tp); + get_monotonic_boottime64(tp); return 0; } -static int posix_get_monotonic_active(clockid_t which_clock, - struct timespec64 *tp) +static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) { - ktime_get_active_ts64(tp); + timekeeping_clocktai64(tp); return 0; } @@ -1317,9 +1316,19 @@ static const struct k_clock clock_tai = { .timer_arm = common_hrtimer_arm, }; -static const struct k_clock clock_monotonic_active = { +static const struct k_clock clock_boottime = { .clock_getres = posix_get_hrtimer_res, - .clock_get = posix_get_monotonic_active, + .clock_get = posix_get_boottime, + .nsleep = common_nsleep, + .timer_create = common_timer_create, + .timer_set = common_timer_set, + .timer_get = common_timer_get, + .timer_del = common_timer_del, + .timer_rearm = common_hrtimer_rearm, + .timer_forward = common_hrtimer_forward, + .timer_remaining = common_hrtimer_remaining, + .timer_try_to_cancel = common_hrtimer_try_to_cancel, + .timer_arm = common_hrtimer_arm, }; static const struct k_clock * const posix_clocks[] = { @@ -1330,11 +1339,10 @@ static const struct k_clock * const posix_clocks[] = { [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, - [CLOCK_BOOTTIME] = &clock_monotonic, + [CLOCK_BOOTTIME] = &clock_boottime, [CLOCK_REALTIME_ALARM] = &alarm_clock, [CLOCK_BOOTTIME_ALARM] = &alarm_clock, [CLOCK_TAI] = &clock_tai, - [CLOCK_MONOTONIC_ACTIVE] = &clock_monotonic_active, }; static const struct k_clock *clockid_to_kclock(const clockid_t id) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 099572ca4a8f..49edc1c4f3e6 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -419,19 +419,6 @@ void tick_suspend_local(void) clockevents_shutdown(td->evtdev); } -static void tick_forward_next_period(void) -{ - ktime_t delta, now = ktime_get(); - u64 n; - - delta = ktime_sub(now, tick_next_period); - n = ktime_divns(delta, tick_period); - tick_next_period += n * tick_period; - if (tick_next_period < now) - tick_next_period += tick_period; - tick_sched_forward_next_period(); -} - /** * tick_resume_local - Resume the local tick device * @@ -444,8 +431,6 @@ void tick_resume_local(void) struct tick_device *td = this_cpu_ptr(&tick_cpu_device); bool broadcast = tick_resume_check_broadcast(); - tick_forward_next_period(); - clockevents_tick_resume(td->evtdev); if (!broadcast) { if (td->mode == TICKDEV_MODE_PERIODIC) diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 21efab7485ca..e277284c2831 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -141,12 +141,6 @@ static inline void tick_check_oneshot_broadcast_this_cpu(void) { } static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } #endif /* !(BROADCAST && ONESHOT) */ -#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) -extern void tick_sched_forward_next_period(void); -#else -static inline void tick_sched_forward_next_period(void) { } -#endif - /* NO_HZ_FULL internal */ #ifdef CONFIG_NO_HZ_FULL extern void tick_nohz_init(void); diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index c1f518e7aa80..6fe615d57ebb 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -82,16 +82,15 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || !tick_device_is_functional(dev)) { - printk(KERN_INFO "Clockevents: " - "could not switch to one-shot mode:"); + pr_info("Clockevents: could not switch to one-shot mode:"); if (!dev) { - printk(" no tick device\n"); + pr_cont(" no tick device\n"); } else { if (!tick_device_is_functional(dev)) - printk(" %s is not functional.\n", dev->name); + pr_cont(" %s is not functional.\n", dev->name); else - printk(" %s does not support one-shot mode.\n", - dev->name); + pr_cont(" %s does not support one-shot mode.\n", + dev->name); } return -EINVAL; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 646645e981f9..da9455a6b42b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -52,15 +52,6 @@ struct tick_sched *tick_get_tick_sched(int cpu) static ktime_t last_jiffies_update; /* - * Called after resume. Make sure that jiffies are not fast forwarded due to - * clock monotonic being forwarded by the suspended time. - */ -void tick_sched_forward_next_period(void) -{ - last_jiffies_update = tick_next_period; -} - -/* * Must be called with interrupts disabled ! */ static void tick_do_update_jiffies64(ktime_t now) @@ -804,12 +795,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) return; } - hrtimer_set_expires(&ts->sched_timer, tick); - - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) - hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); - else + if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { + hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED); + } else { + hrtimer_set_expires(&ts->sched_timer, tick); tick_program_event(tick, 1); + } } static void tick_nohz_retain_tick(struct tick_sched *ts) diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ca90219a1e73..49cbceef5deb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -138,12 +138,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) { - /* Update both bases so mono and raw stay coupled. */ - tk->tkr_mono.base += delta; - tk->tkr_raw.base += delta; - - /* Accumulate time spent in suspend */ - tk->time_suspended += delta; + tk->offs_boot = ktime_add(tk->offs_boot, delta); } /* @@ -473,6 +468,36 @@ u64 ktime_get_raw_fast_ns(void) } EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); +/** + * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock. + * + * To keep it NMI safe since we're accessing from tracing, we're not using a + * separate timekeeper with updates to monotonic clock and boot offset + * protected with seqlocks. This has the following minor side effects: + * + * (1) Its possible that a timestamp be taken after the boot offset is updated + * but before the timekeeper is updated. If this happens, the new boot offset + * is added to the old timekeeping making the clock appear to update slightly + * earlier: + * CPU 0 CPU 1 + * timekeeping_inject_sleeptime64() + * __timekeeping_inject_sleeptime(tk, delta); + * timestamp(); + * timekeeping_update(tk, TK_CLEAR_NTP...); + * + * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be + * partially updated. Since the tk->offs_boot update is a rare event, this + * should be a rare occurrence which postprocessing should be able to handle. + */ +u64 notrace ktime_get_boot_fast_ns(void) +{ + struct timekeeper *tk = &tk_core.timekeeper; + + return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot)); +} +EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); + + /* * See comment for __ktime_get_fast_ns() vs. timestamp ordering */ @@ -764,6 +789,7 @@ EXPORT_SYMBOL_GPL(ktime_get_resolution_ns); static ktime_t *offsets[TK_OFFS_MAX] = { [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real, + [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot, [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai, }; @@ -861,39 +887,6 @@ void ktime_get_ts64(struct timespec64 *ts) EXPORT_SYMBOL_GPL(ktime_get_ts64); /** - * ktime_get_active_ts64 - Get the active non-suspended monotonic clock - * @ts: pointer to timespec variable - * - * The function calculates the monotonic clock from the realtime clock and - * the wall_to_monotonic offset, subtracts the accumulated suspend time and - * stores the result in normalized timespec64 format in the variable - * pointed to by @ts. - */ -void ktime_get_active_ts64(struct timespec64 *ts) -{ - struct timekeeper *tk = &tk_core.timekeeper; - struct timespec64 tomono, tsusp; - u64 nsec, nssusp; - unsigned int seq; - - WARN_ON(timekeeping_suspended); - - do { - seq = read_seqcount_begin(&tk_core.seq); - ts->tv_sec = tk->xtime_sec; - nsec = timekeeping_get_ns(&tk->tkr_mono); - tomono = tk->wall_to_monotonic; - nssusp = tk->time_suspended; - } while (read_seqcount_retry(&tk_core.seq, seq)); - - ts->tv_sec += tomono.tv_sec; - ts->tv_nsec = 0; - timespec64_add_ns(ts, nsec + tomono.tv_nsec); - tsusp = ns_to_timespec64(nssusp); - *ts = timespec64_sub(*ts, tsusp); -} - -/** * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC * * Returns the seconds portion of CLOCK_MONOTONIC with a single non @@ -1593,6 +1586,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, return; } tk_xtime_add(tk, delta); + tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); tk_update_sleep_time(tk, timespec64_to_ktime(*delta)); tk_debug_account_sleep_time(delta); } @@ -2125,7 +2119,7 @@ out: void getboottime64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; - ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended); + ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); *ts = ktime_to_timespec64(t); } @@ -2139,13 +2133,6 @@ unsigned long get_seconds(void) } EXPORT_SYMBOL(get_seconds); -struct timespec __current_kernel_time(void) -{ - struct timekeeper *tk = &tk_core.timekeeper; - - return timespec64_to_timespec(tk_xtime(tk)); -} - struct timespec64 current_kernel_time64(void) { struct timekeeper *tk = &tk_core.timekeeper; @@ -2195,6 +2182,7 @@ void do_timer(unsigned long ticks) * ktime_get_update_offsets_now - hrtimer helper * @cwsseq: pointer to check and store the clock was set sequence number * @offs_real: pointer to storage for monotonic -> realtime offset + * @offs_boot: pointer to storage for monotonic -> boottime offset * @offs_tai: pointer to storage for monotonic -> clock tai offset * * Returns current monotonic time and updates the offsets if the @@ -2204,7 +2192,7 @@ void do_timer(unsigned long ticks) * Called from hrtimer_interrupt() or retrigger_next_event() */ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, - ktime_t *offs_tai) + ktime_t *offs_boot, ktime_t *offs_tai) { struct timekeeper *tk = &tk_core.timekeeper; unsigned int seq; @@ -2221,6 +2209,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, if (*cwsseq != tk->clock_was_set_seq) { *cwsseq = tk->clock_was_set_seq; *offs_real = tk->offs_real; + *offs_boot = tk->offs_boot; *offs_tai = tk->offs_tai; } diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index 79b67f5e0343..7a9b4eb7a1d5 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h @@ -6,6 +6,7 @@ */ extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, + ktime_t *offs_boot, ktime_t *offs_tai); extern int timekeeping_valid_for_hres(void); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d88e96d4e12c..56ba0f2a01db 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) { struct perf_event_query_bpf __user *uquery = info; struct perf_event_query_bpf query = {}; + u32 *ids, prog_cnt, ids_len; int ret; if (!capable(CAP_SYS_ADMIN)) @@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) return -EINVAL; if (copy_from_user(&query, uquery, sizeof(query))) return -EFAULT; - if (query.ids_len > BPF_TRACE_MAX_PROGS) + + ids_len = query.ids_len; + if (ids_len > BPF_TRACE_MAX_PROGS) return -E2BIG; + ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); + if (!ids) + return -ENOMEM; + /* + * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which + * is required when user only wants to check for uquery->prog_cnt. + * There is no need to check for it since the case is handled + * gracefully in bpf_prog_array_copy_info. + */ mutex_lock(&bpf_event_mutex); ret = bpf_prog_array_copy_info(event->tp_event->prog_array, - uquery->ids, - query.ids_len, - &uquery->prog_cnt); + ids, + ids_len, + &prog_cnt); mutex_unlock(&bpf_event_mutex); + if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || + copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) + ret = -EFAULT; + + kfree(ids); return ret; } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 16bbf062018f..8d83bcf9ef69 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5514,10 +5514,10 @@ static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) ftrace_create_filter_files(&global_ops, d_tracer); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - trace_create_file("set_graph_function", 0444, d_tracer, + trace_create_file("set_graph_function", 0644, d_tracer, NULL, &ftrace_graph_fops); - trace_create_file("set_graph_notrace", 0444, d_tracer, + trace_create_file("set_graph_notrace", 0644, d_tracer, NULL, &ftrace_graph_notrace_fops); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dfbcf9ee1447..414d7210b2ec 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1165,7 +1165,7 @@ static struct { { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, - { ktime_get_mono_fast_ns, "boot", 1 }, + { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e954ae3d82c0..e3a658bac10f 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry, __field( unsigned int, seqnum ) ), - F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n", + F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n", __entry->seqnum, __entry->tv_sec, __entry->tv_nsec, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 9b4716bb8bb0..7d306b74230f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -762,6 +762,9 @@ static int regex_match_full(char *str, struct regex *r, int len) static int regex_match_front(char *str, struct regex *r, int len) { + if (len < r->len) + return 0; + if (strncmp(str, r->pattern, r->len) == 0) return 1; return 0; @@ -1499,14 +1502,14 @@ static int process_preds(struct trace_event_call *call, return ret; } - if (!nr_preds) { - prog = NULL; - } else { - prog = predicate_parse(filter_string, nr_parens, nr_preds, + if (!nr_preds) + return -EINVAL; + + prog = predicate_parse(filter_string, nr_parens, nr_preds, parse_pred, call, pe); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (IS_ERR(prog)) + return PTR_ERR(prog); + rcu_assign_pointer(filter->prog, prog); return 0; } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0d7b3ffbecc2..b9061ed59bbd 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2466,6 +2466,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, else if (strcmp(modifier, "usecs") == 0) *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; else { + hist_err("Invalid field modifier: ", modifier); field = ERR_PTR(-EINVAL); goto out; } @@ -2481,6 +2482,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, else { field = trace_find_event_field(file->event_call, field_name); if (!field || !field->size) { + hist_err("Couldn't find field: ", field_name); field = ERR_PTR(-EINVAL); goto out; } @@ -4913,6 +4915,16 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) seq_printf(m, "%s", field_name); } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) seq_puts(m, "common_timestamp"); + + if (hist_field->flags) { + if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && + !(hist_field->flags & HIST_FIELD_FL_EXPR)) { + const char *flags = get_hist_field_flags(hist_field); + + if (flags) + seq_printf(m, ".%s", flags); + } + } } static int event_hist_trigger_print(struct seq_file *m, diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1cd3fb4d70f8..02aed76e0978 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) if (ret == 0) tk->tp.flags |= TP_FLAG_REGISTERED; else { - pr_warn("Could not insert probe at %s+%lu: %d\n", - trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); ret = 0; diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 3c7bfc4bf5e9..4237eba4ef20 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -472,7 +472,7 @@ static __init int stack_trace_init(void) NULL, &stack_trace_fops); #ifdef CONFIG_DYNAMIC_FTRACE - trace_create_file("stack_trace_filter", 0444, d_tracer, + trace_create_file("stack_trace_filter", 0644, d_tracer, &trace_ops, &stack_trace_filter_fops); #endif diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 34fd0e0ec51d..ac892878dbe6 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -55,6 +55,7 @@ struct trace_uprobe { struct list_head list; struct trace_uprobe_filter filter; struct uprobe_consumer consumer; + struct path path; struct inode *inode; char *filename; unsigned long offset; @@ -289,7 +290,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) for (i = 0; i < tu->tp.nr_args; i++) traceprobe_free_probe_arg(&tu->tp.args[i]); - iput(tu->inode); + path_put(&tu->path); kfree(tu->tp.call.class->system); kfree(tu->tp.call.name); kfree(tu->filename); @@ -363,7 +364,6 @@ end: static int create_trace_uprobe(int argc, char **argv) { struct trace_uprobe *tu; - struct inode *inode; char *arg, *event, *group, *filename; char buf[MAX_EVENT_NAME_LEN]; struct path path; @@ -371,7 +371,6 @@ static int create_trace_uprobe(int argc, char **argv) bool is_delete, is_return; int i, ret; - inode = NULL; ret = 0; is_delete = false; is_return = false; @@ -437,21 +436,16 @@ static int create_trace_uprobe(int argc, char **argv) } /* Find the last occurrence, in case the path contains ':' too. */ arg = strrchr(argv[1], ':'); - if (!arg) { - ret = -EINVAL; - goto fail_address_parse; - } + if (!arg) + return -EINVAL; *arg++ = '\0'; filename = argv[1]; ret = kern_path(filename, LOOKUP_FOLLOW, &path); if (ret) - goto fail_address_parse; - - inode = igrab(d_real_inode(path.dentry)); - path_put(&path); + return ret; - if (!inode || !S_ISREG(inode->i_mode)) { + if (!d_is_reg(path.dentry)) { ret = -EINVAL; goto fail_address_parse; } @@ -490,7 +484,7 @@ static int create_trace_uprobe(int argc, char **argv) goto fail_address_parse; } tu->offset = offset; - tu->inode = inode; + tu->path = path; tu->filename = kstrdup(filename, GFP_KERNEL); if (!tu->filename) { @@ -558,7 +552,7 @@ error: return ret; fail_address_parse: - iput(inode); + path_put(&path); pr_info("Failed to parse address or file.\n"); @@ -922,6 +916,7 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, goto err_flags; tu->consumer.filter = filter; + tu->inode = d_real_inode(tu->path.dentry); ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); if (ret) goto err_buffer; @@ -967,6 +962,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) WARN_ON(!uprobe_filter_is_empty(&tu->filter)); uprobe_unregister(tu->inode, tu->offset, &tu->consumer); + tu->inode = NULL; tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; uprobe_buffer_disable(); @@ -1337,7 +1333,6 @@ struct trace_event_call * create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) { struct trace_uprobe *tu; - struct inode *inode; struct path path; int ret; @@ -1345,11 +1340,8 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) if (ret) return ERR_PTR(ret); - inode = igrab(d_inode(path.dentry)); - path_put(&path); - - if (!inode || !S_ISREG(inode->i_mode)) { - iput(inode); + if (!d_is_reg(path.dentry)) { + path_put(&path); return ERR_PTR(-EINVAL); } @@ -1364,11 +1356,12 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) if (IS_ERR(tu)) { pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); + path_put(&path); return ERR_CAST(tu); } tu->offset = offs; - tu->inode = inode; + tu->path = path; tu->filename = kstrdup(name, GFP_KERNEL); init_trace_event_call(tu, &tu->tp.call); diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 671b13457387..1e37da2e0c25 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -207,7 +207,7 @@ static int tracepoint_add_func(struct tracepoint *tp, lockdep_is_held(&tracepoints_mutex)); old = func_add(&tp_funcs, func, prio); if (IS_ERR(old)) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); return PTR_ERR(old); } @@ -239,7 +239,7 @@ static int tracepoint_remove_func(struct tracepoint *tp, lockdep_is_held(&tracepoints_mutex)); old = func_remove(&tp_funcs, func); if (IS_ERR(old)) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); return PTR_ERR(old); } diff --git a/lib/dma-direct.c b/lib/dma-direct.c index c0bba30fef0a..bbfb229aa067 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -84,7 +84,8 @@ again: __free_pages(page, page_order); page = NULL; - if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && + if (IS_ENABLED(CONFIG_ZONE_DMA) && + dev->coherent_dma_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) { gfp = (gfp & ~GFP_DMA32) | GFP_DMA; goto again; diff --git a/lib/errseq.c b/lib/errseq.c index df782418b333..81f9e33aa7e7 100644 --- a/lib/errseq.c +++ b/lib/errseq.c @@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set); * errseq_sample() - Grab current errseq_t value. * @eseq: Pointer to errseq_t to be sampled. * - * This function allows callers to sample an errseq_t value, marking it as - * "seen" if required. + * This function allows callers to initialise their errseq_t variable. + * If the error has been "seen", new callers will not see an old error. + * If there is an unseen error in @eseq, the caller of this function will + * see it the next time it checks for an error. * + * Context: Any context. * Return: The current errseq value. */ errseq_t errseq_sample(errseq_t *eseq) { errseq_t old = READ_ONCE(*eseq); - errseq_t new = old; - /* - * For the common case of no errors ever having been set, we can skip - * marking the SEEN bit. Once an error has been set, the value will - * never go back to zero. - */ - if (old != 0) { - new |= ERRSEQ_SEEN; - if (old != new) - cmpxchg(eseq, old, new); - } - return new; + /* If nobody has seen this error yet, then we can be the first. */ + if (!(old & ERRSEQ_SEEN)) + old = 0; + return old; } EXPORT_SYMBOL(errseq_sample); diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 5985a25e6cbc..5367ffa5c18f 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -132,7 +132,12 @@ static int __init find_bit_test(void) test_find_next_bit(bitmap, BITMAP_LEN); test_find_next_zero_bit(bitmap, BITMAP_LEN); test_find_last_bit(bitmap, BITMAP_LEN); - test_find_first_bit(bitmap, BITMAP_LEN); + + /* + * test_find_first_bit() may take some time, so + * traverse only part of bitmap to avoid soft lockup. + */ + test_find_first_bit(bitmap, BITMAP_LEN / 10); test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); pr_err("\nStart testing find_bit() with sparse bitmap\n"); diff --git a/lib/kobject.c b/lib/kobject.c index e1d1f290bf35..18989b5b3b56 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -233,13 +233,12 @@ static int kobject_add_internal(struct kobject *kobj) /* be noisy on error issues */ if (error == -EEXIST) - WARN(1, - "%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", - __func__, kobject_name(kobj)); + pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", + __func__, kobject_name(kobj)); else - WARN(1, "%s failed for %s (error: %d parent: %s)\n", - __func__, kobject_name(kobj), error, - parent ? kobject_name(parent) : "'none'"); + pr_err("%s failed for %s (error: %d parent: %s)\n", + __func__, kobject_name(kobj), error, + parent ? kobject_name(parent) : "'none'"); } else kobj->state_in_sysfs = 1; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index fece57566d45..cc640588f145 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -714,7 +714,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, phys_addr = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), - 0, size, DMA_FROM_DEVICE, 0); + 0, size, DMA_FROM_DEVICE, attrs); if (phys_addr == SWIOTLB_MAP_ERROR) goto out_warn; @@ -737,7 +737,7 @@ out_unmap: swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); out_warn: - if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { dev_warn(dev, "swiotlb: coherent allocation failed, size=%zu\n", size); diff --git a/lib/textsearch.c b/lib/textsearch.c index 0b79908dfe89..5939549c0e7b 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -10,7 +10,10 @@ * Pablo Neira Ayuso <pablo@netfilter.org> * * ========================================================================== - * + */ + +/** + * DOC: ts_intro * INTRODUCTION * * The textsearch infrastructure provides text searching facilities for @@ -19,7 +22,9 @@ * * ARCHITECTURE * - * User + * .. code-block:: none + * + * User * +----------------+ * | finish()|<--------------(6)-----------------+ * |get_next_block()|<--------------(5)---------------+ | @@ -33,21 +38,21 @@ * | (3)|----->| find()/next() |-----------+ | * | (7)|----->| destroy() |----------------------+ * +----------------+ +---------------+ - * - * (1) User configures a search by calling _prepare() specifying the - * search parameters such as the pattern and algorithm name. + * + * (1) User configures a search by calling textsearch_prepare() specifying + * the search parameters such as the pattern and algorithm name. * (2) Core requests the algorithm to allocate and initialize a search * configuration according to the specified parameters. - * (3) User starts the search(es) by calling _find() or _next() to - * fetch subsequent occurrences. A state variable is provided - * to the algorithm to store persistent variables. + * (3) User starts the search(es) by calling textsearch_find() or + * textsearch_next() to fetch subsequent occurrences. A state variable + * is provided to the algorithm to store persistent variables. * (4) Core eventually resets the search offset and forwards the find() * request to the algorithm. * (5) Algorithm calls get_next_block() provided by the user continuously * to fetch the data to be searched in block by block. * (6) Algorithm invokes finish() after the last call to get_next_block * to clean up any leftovers from get_next_block. (Optional) - * (7) User destroys the configuration by calling _destroy(). + * (7) User destroys the configuration by calling textsearch_destroy(). * (8) Core notifies the algorithm to destroy algorithm specific * allocations. (Optional) * @@ -62,9 +67,10 @@ * amount of times and even in parallel as long as a separate struct * ts_state variable is provided to every instance. * - * The actual search is performed by either calling textsearch_find_- - * continuous() for linear data or by providing an own get_next_block() - * implementation and calling textsearch_find(). Both functions return + * The actual search is performed by either calling + * textsearch_find_continuous() for linear data or by providing + * an own get_next_block() implementation and + * calling textsearch_find(). Both functions return * the position of the first occurrence of the pattern or UINT_MAX if * no match was found. Subsequent occurrences can be found by calling * textsearch_next() regardless of the linearity of the data. @@ -72,7 +78,7 @@ * Once you're done using a configuration it must be given back via * textsearch_destroy. * - * EXAMPLE + * EXAMPLE:: * * int pos; * struct ts_config *conf; @@ -87,13 +93,13 @@ * goto errout; * } * - * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); + * pos = textsearch_find_continuous(conf, \&state, example, strlen(example)); * if (pos != UINT_MAX) - * panic("Oh my god, dancing chickens at %d\n", pos); + * panic("Oh my god, dancing chickens at \%d\n", pos); * * textsearch_destroy(conf); - * ========================================================================== */ +/* ========================================================================== */ #include <linux/module.h> #include <linux/types.h> @@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst, * * Returns the position of first occurrence of the pattern or * %UINT_MAX if no occurrence was found. - */ + */ unsigned int textsearch_find_continuous(struct ts_config *conf, struct ts_state *state, const void *data, unsigned int len) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 023190c69dce..7441bd93b732 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -115,6 +115,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name) bdi, &bdi_debug_stats_fops); if (!bdi->debug_stats) { debugfs_remove(bdi->debug_dir); + bdi->debug_dir = NULL; return -ENOMEM; } @@ -383,7 +384,7 @@ static void wb_shutdown(struct bdi_writeback *wb) * the barrier provided by test_and_clear_bit() above. */ smp_wmb(); - clear_bit(WB_shutting_down, &wb->state); + clear_and_wake_up_bit(WB_shutting_down, &wb->state); } static void wb_exit(struct bdi_writeback *wb) diff --git a/mm/filemap.c b/mm/filemap.c index 9276bdb2343c..0604cb02e6f3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) VM_BUG_ON_PAGE(!PageLocked(new), new); VM_BUG_ON_PAGE(new->mapping, new); - error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); + error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); if (!error) { struct address_space *mapping = old->mapping; void (*freepage)(struct page *); @@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page, return error; } - error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); + error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); if (error) { if (!huge) mem_cgroup_cancel_charge(page, memcg, false); @@ -1585,8 +1585,7 @@ no_page: if (fgp_flags & FGP_ACCESSED) __SetPageReferenced(page); - err = add_to_page_cache_lru(page, mapping, offset, - gfp_mask & GFP_RECLAIM_MASK); + err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); if (unlikely(err)) { put_page(page); page = NULL; @@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) if (!page) return -ENOMEM; - ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); + ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); if (ret == 0) ret = mapping->a_ops->readpage(file, page); else if (ret == -EEXIST) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 14ed6ee5e02f..a3a1815f8e11 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) pmde = maybe_pmd_mkwrite(pmde, vma); flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); - page_add_anon_rmap(new, vma, mmun_start, true); + if (PageAnon(new)) + page_add_anon_rmap(new, vma, mmun_start, true); + else + page_add_file_rmap(new, true); set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); if (vma->vm_flags & VM_LOCKED) mlock_vma_page(new); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e074f7c637aa..2bd3df3d101a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, { struct memcg_kmem_cache_create_work *cw; - cw = kmalloc(sizeof(*cw), GFP_NOWAIT); + cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); if (!cw) return; diff --git a/mm/migrate.c b/mm/migrate.c index f65dd69e1fd1..8c0af0f7cab1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping, pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page)); - expected_count += 1 + page_has_private(page); + expected_count += hpage_nr_pages(page) + page_has_private(page); if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { @@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - get_page(newpage); /* add cache reference */ + page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -524,13 +524,24 @@ int migrate_page_move_mapping(struct address_space *mapping, } radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); + if (PageTransHuge(page)) { + int i; + int index = page_index(page); + + for (i = 1; i < HPAGE_PMD_NR; i++) { + pslot = radix_tree_lookup_slot(&mapping->i_pages, + index + i); + radix_tree_replace_slot(&mapping->i_pages, pslot, + newpage + i); + } + } /* * Drop cache reference from old page by unfreezing * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - 1); + page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); xa_unlock(&mapping->i_pages); /* Leave irq disabled to prevent preemption while updating stats */ @@ -1622,6 +1633,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, current_node = NUMA_NO_NODE; } out_flush: + if (list_empty(&pagelist)) + return err; + /* Make sure we do not overwrite the existing error */ err1 = do_move_pages_to_node(mm, &pagelist, current_node); if (!err1) diff --git a/mm/mmap.c b/mm/mmap.c index 188f195883b9..78e14facdb6e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = { __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; +#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT +static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + return prot; +} +#endif + pgprot_t vm_get_page_prot(unsigned long vm_flags) { - return __pgprot(pgprot_val(protection_map[vm_flags & + pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); + + return arch_filter_pgprot(ret); } EXPORT_SYMBOL(vm_get_page_prot); @@ -1315,6 +1324,35 @@ static inline int mlock_future_check(struct mm_struct *mm, return 0; } +static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) +{ + if (S_ISREG(inode->i_mode)) + return inode->i_sb->s_maxbytes; + + if (S_ISBLK(inode->i_mode)) + return MAX_LFS_FILESIZE; + + /* Special "we do even unsigned file positions" case */ + if (file->f_mode & FMODE_UNSIGNED_OFFSET) + return 0; + + /* Yes, random drivers might want more. But I'm tired of buggy drivers */ + return ULONG_MAX; +} + +static inline bool file_mmap_ok(struct file *file, struct inode *inode, + unsigned long pgoff, unsigned long len) +{ + u64 maxsize = file_mmap_size_max(file, inode); + + if (maxsize && len > maxsize) + return false; + maxsize -= len; + if (pgoff > maxsize >> PAGE_SHIFT) + return false; + return true; +} + /* * The caller must hold down_write(¤t->mm->mmap_sem). */ @@ -1400,6 +1438,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, struct inode *inode = file_inode(file); unsigned long flags_mask; + if (!file_mmap_ok(file, inode, pgoff, len)) + return -EOVERFLOW; + flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; switch (flags & MAP_TYPE) { @@ -3015,6 +3056,32 @@ void exit_mmap(struct mm_struct *mm) /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); + if (unlikely(mm_is_oom_victim(mm))) { + /* + * Manually reap the mm to free as much memory as possible. + * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard + * this mm from further consideration. Taking mm->mmap_sem for + * write after setting MMF_OOM_SKIP will guarantee that the oom + * reaper will not run on this mm again after mmap_sem is + * dropped. + * + * Nothing can be holding mm->mmap_sem here and the above call + * to mmu_notifier_release(mm) ensures mmu notifier callbacks in + * __oom_reap_task_mm() will not block. + * + * This needs to be done before calling munlock_vma_pages_all(), + * which clears VM_LOCKED, otherwise the oom reaper cannot + * reliably test it. + */ + mutex_lock(&oom_lock); + __oom_reap_task_mm(mm); + mutex_unlock(&oom_lock); + + set_bit(MMF_OOM_SKIP, &mm->flags); + down_write(&mm->mmap_sem); + up_write(&mm->mmap_sem); + } + if (mm->locked_vm) { vma = mm->mmap; while (vma) { @@ -3036,24 +3103,6 @@ void exit_mmap(struct mm_struct *mm) /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); - - if (unlikely(mm_is_oom_victim(mm))) { - /* - * Wait for oom_reap_task() to stop working on this - * mm. Because MMF_OOM_SKIP is already set before - * calling down_read(), oom_reap_task() will not run - * on this "mm" post up_write(). - * - * mm_is_oom_victim() cannot be set from under us - * either because victim->mm is already set to NULL - * under task_lock before calling mmput and oom_mm is - * set not NULL by the OOM killer only if victim->mm - * is found not NULL while holding the task_lock. - */ - set_bit(MMF_OOM_SKIP, &mm->flags); - down_write(&mm->mmap_sem); - up_write(&mm->mmap_sem); - } free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ff992fa8760a..8ba6cb88cf58 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -469,7 +469,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) return false; } - #ifdef CONFIG_MMU /* * OOM Reaper kernel thread which tries to reap the memory used by the OOM @@ -480,16 +479,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); static struct task_struct *oom_reaper_list; static DEFINE_SPINLOCK(oom_reaper_lock); -static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) +void __oom_reap_task_mm(struct mm_struct *mm) { - struct mmu_gather tlb; struct vm_area_struct *vma; + + /* + * Tell all users of get_user/copy_from_user etc... that the content + * is no longer stable. No barriers really needed because unmapping + * should imply barriers already and the reader would hit a page fault + * if it stumbled over a reaped memory. + */ + set_bit(MMF_UNSTABLE, &mm->flags); + + for (vma = mm->mmap ; vma; vma = vma->vm_next) { + if (!can_madv_dontneed_vma(vma)) + continue; + + /* + * Only anonymous pages have a good chance to be dropped + * without additional steps which we cannot afford as we + * are OOM already. + * + * We do not even care about fs backed pages because all + * which are reclaimable have already been reclaimed and + * we do not want to block exit_mmap by keeping mm ref + * count elevated without a good reason. + */ + if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { + const unsigned long start = vma->vm_start; + const unsigned long end = vma->vm_end; + struct mmu_gather tlb; + + tlb_gather_mmu(&tlb, mm, start, end); + mmu_notifier_invalidate_range_start(mm, start, end); + unmap_page_range(&tlb, vma, start, end, NULL); + mmu_notifier_invalidate_range_end(mm, start, end); + tlb_finish_mmu(&tlb, start, end); + } + } +} + +static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) +{ bool ret = true; /* * We have to make sure to not race with the victim exit path * and cause premature new oom victim selection: - * __oom_reap_task_mm exit_mm + * oom_reap_task_mm exit_mm * mmget_not_zero * mmput * atomic_dec_and_test @@ -534,39 +571,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) trace_start_task_reaping(tsk->pid); - /* - * Tell all users of get_user/copy_from_user etc... that the content - * is no longer stable. No barriers really needed because unmapping - * should imply barriers already and the reader would hit a page fault - * if it stumbled over a reaped memory. - */ - set_bit(MMF_UNSTABLE, &mm->flags); - - for (vma = mm->mmap ; vma; vma = vma->vm_next) { - if (!can_madv_dontneed_vma(vma)) - continue; + __oom_reap_task_mm(mm); - /* - * Only anonymous pages have a good chance to be dropped - * without additional steps which we cannot afford as we - * are OOM already. - * - * We do not even care about fs backed pages because all - * which are reclaimable have already been reclaimed and - * we do not want to block exit_mmap by keeping mm ref - * count elevated without a good reason. - */ - if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { - const unsigned long start = vma->vm_start; - const unsigned long end = vma->vm_end; - - tlb_gather_mmu(&tlb, mm, start, end); - mmu_notifier_invalidate_range_start(mm, start, end); - unmap_page_range(&tlb, vma, start, end, NULL); - mmu_notifier_invalidate_range_end(mm, start, end); - tlb_finish_mmu(&tlb, start, end); - } - } pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), @@ -587,14 +593,13 @@ static void oom_reap_task(struct task_struct *tsk) struct mm_struct *mm = tsk->signal->oom_mm; /* Retry the down_read_trylock(mmap_sem) a few times */ - while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) + while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) schedule_timeout_idle(HZ/10); if (attempts <= MAX_OOM_REAP_RETRIES || test_bit(MMF_OOM_SKIP, &mm->flags)) goto done; - pr_info("oom_reaper: unable to reap pid:%d (%s)\n", task_pid_nr(tsk), tsk->comm); debug_show_all_locks(); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5c1a3279e63f..337c6afb3345 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); current->nr_dirtied--; dec_node_page_state(page, NR_DIRTIED); dec_wb_stat(wb, WB_DIRTIED); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); } } EXPORT_SYMBOL(account_page_redirty); @@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page) if (mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; lock_page_memcg(page); - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); if (TestClearPageDirty(page)) account_page_cleaned(page, mapping, wb); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); unlock_page_memcg(page); } else { ClearPageDirty(page); @@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; /* * Yes, Virginia, this is indeed insane. @@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page) * always locked coming in here, so we get the desired * exclusion. */ - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); if (TestClearPageDirty(page)) { dec_lruvec_page_state(page, NR_FILE_DIRTY); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); dec_wb_stat(wb, WB_RECLAIMABLE); ret = 1; } - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); return ret; } return TestClearPageDirty(page); diff --git a/mm/rmap.c b/mm/rmap.c index f0dd4e4565bc..8d5337fed37b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (!pvmw.pte && (flags & TTU_MIGRATION)) { VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); - if (!PageAnon(page)) - continue; - set_pmd_migration_entry(&pvmw, page); continue; } diff --git a/mm/sparse.c b/mm/sparse.c index 62eef264a7bd..73dc2fcc0eab 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -629,7 +629,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { - unsigned long section_nr = pfn_to_section_nr(start_pfn); + unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *ms; /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b920ce3ae02..9b697323a88c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -303,7 +303,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone /* * Add a shrinker callback to be called from the vm. */ -int register_shrinker(struct shrinker *shrinker) +int prealloc_shrinker(struct shrinker *shrinker) { size_t size = sizeof(*shrinker->nr_deferred); @@ -313,10 +313,29 @@ int register_shrinker(struct shrinker *shrinker) shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) return -ENOMEM; + return 0; +} + +void free_prealloced_shrinker(struct shrinker *shrinker) +{ + kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; +} +void register_shrinker_prepared(struct shrinker *shrinker) +{ down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); +} + +int register_shrinker(struct shrinker *shrinker) +{ + int err = prealloc_shrinker(shrinker); + + if (err) + return err; + register_shrinker_prepared(shrinker); return 0; } EXPORT_SYMBOL(register_shrinker); diff --git a/mm/vmstat.c b/mm/vmstat.c index 536332e988b8..a2b9518980ce 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1161,7 +1161,7 @@ const char * const vmstat_text[] = { "nr_vmscan_immediate_reclaim", "nr_dirtied", "nr_written", - "nr_indirectly_reclaimable", + "", /* nr_indirectly_reclaimable */ /* enum writeback_stat_item counters */ "nr_dirty_threshold", @@ -1740,6 +1740,10 @@ static int vmstat_show(struct seq_file *m, void *arg) unsigned long *l = arg; unsigned long off = l - (unsigned long *)m->private; + /* Skip hidden vmstat items. */ + if (*vmstat_text[off] == '\0') + return 0; + seq_puts(m, vmstat_text[off]); seq_put_decimal_ull(m, " ", *l); seq_putc(m, '\n'); diff --git a/mm/z3fold.c b/mm/z3fold.c index c0bca6153b95..4b366d181f35 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -144,7 +144,8 @@ enum z3fold_page_flags { PAGE_HEADLESS = 0, MIDDLE_CHUNK_MAPPED, NEEDS_COMPACTING, - PAGE_STALE + PAGE_STALE, + UNDER_RECLAIM }; /***************** @@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); clear_bit(PAGE_STALE, &page->private); + clear_bit(UNDER_RECLAIM, &page->private); spin_lock_init(&zhdr->page_lock); kref_init(&zhdr->refcount); @@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) atomic64_dec(&pool->pages_nr); return; } + if (test_bit(UNDER_RECLAIM, &page->private)) { + z3fold_page_unlock(zhdr); + return; + } if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { z3fold_page_unlock(zhdr); return; @@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) kref_get(&zhdr->refcount); list_del_init(&zhdr->buddy); zhdr->cpu = -1; + set_bit(UNDER_RECLAIM, &page->private); + break; } list_del_init(&page->lru); @@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) goto next; } next: - spin_lock(&pool->lock); if (test_bit(PAGE_HEADLESS, &page->private)) { if (ret == 0) { - spin_unlock(&pool->lock); free_z3fold_page(page); return 0; } - } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { - atomic64_dec(&pool->pages_nr); + spin_lock(&pool->lock); + list_add(&page->lru, &pool->lru); + spin_unlock(&pool->lock); + } else { + z3fold_page_lock(zhdr); + clear_bit(UNDER_RECLAIM, &page->private); + if (kref_put(&zhdr->refcount, + release_z3fold_page_locked)) { + atomic64_dec(&pool->pages_nr); + return 0; + } + /* + * if we are here, the page is still not completely + * free. Take the global pool lock then to be able + * to add it back to the lru list + */ + spin_lock(&pool->lock); + list_add(&page->lru, &pool->lru); spin_unlock(&pool->lock); - return 0; + z3fold_page_unlock(zhdr); } - /* - * Add to the beginning of LRU. - * Pool lock has to be kept here to ensure the page has - * not already been released - */ - list_add(&page->lru, &pool->lru); + /* We started off locked to we need to lock the pool back */ + spin_lock(&pool->lock); } spin_unlock(&pool->lock); return -EAGAIN; diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index 38aa6345bdfa..b718db2085b2 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c @@ -16,7 +16,7 @@ #include <linux/module.h> /** - * p9_release_req_pages - Release pages after the transaction. + * p9_release_pages - Release pages after the transaction. */ void p9_release_pages(struct page **pages, int nr_pages) { diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 0cfba919d167..848969fe7979 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -1092,8 +1092,8 @@ static struct p9_trans_module p9_fd_trans = { }; /** - * p9_poll_proc - poll worker thread - * @a: thread state and arguments + * p9_poll_workfn - poll worker thread + * @work: work queue * * polls all v9fs transports for new events and queues the appropriate * work to the work queue diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 6d8e3031978f..3d414acb7015 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -68,8 +68,6 @@ * @pd: Protection Domain pointer * @qp: Queue Pair pointer * @cq: Completion Queue pointer - * @dm_mr: DMA Memory Region pointer - * @lkey: The local access only memory region key * @timeout: Number of uSecs to wait for connection management events * @privport: Whether a privileged port may be used * @port: The port to use @@ -632,7 +630,7 @@ static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) } /** - * trans_create_rdma - Transport method for creating atransport instance + * rdma_create_trans - Transport method for creating a transport instance * @client: client instance * @addr: IP address string * @args: Mount options string diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 3aa5a93ad107..4d0372263e5d 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -60,7 +60,6 @@ static atomic_t vp_pinned = ATOMIC_INIT(0); /** * struct virtio_chan - per-instance transport information - * @initialized: whether the channel is initialized * @inuse: whether the channel is in use * @lock: protects multiple elements within this structure * @client: client instance @@ -385,8 +384,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, * @uidata: user bffer that should be ued for zero copy read * @uodata: user buffer that shoud be user for zero copy write * @inlen: read buffer size - * @olen: write buffer size - * @hdrlen: reader header size, This is the size of response protocol data + * @outlen: write buffer size + * @in_hdr_len: reader header size, This is the size of response protocol data * */ static int diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 086a4abdfa7c..0f19960390a6 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -485,7 +485,7 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, static int xen_9pfs_front_resume(struct xenbus_device *dev) { - dev_warn(&dev->dev, "suspsend/resume unsupported\n"); + dev_warn(&dev->dev, "suspend/resume unsupported\n"); return 0; } diff --git a/net/atm/lec.c b/net/atm/lec.c index 01d5d20a6eb1..3138a869b5c0 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; #include <linux/module.h> #include <linux/init.h> +/* Hardening for Spectre-v1 */ +#include <linux/nospec.h> + #include "lec.h" #include "lec_arpc.h" #include "resources.h" @@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); if (bytes_left != 0) pr_info("copy from user failed for %d bytes\n", bytes_left); - if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || - !dev_lec[ioc_data.dev_num]) + if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF) + return -EINVAL; + ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF); + if (!dev_lec[ioc_data.dev_num]) return -EINVAL; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 82c1a6f430b3..5bb6681fa91e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -518,8 +518,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, return -ELOOP; } - /* Device is already being bridged */ - if (br_port_exists(dev)) + /* Device has master upper dev */ + if (netdev_master_upper_dev_get(dev)) return -EBUSY; /* No bridging devices that dislike that (e.g. wireless) */ diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 032e0fe45940..28a4c3490359 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info, { unsigned int size = info->entries_size; const void *entries = info->entries; - int ret; newinfo->entries_size = size; - - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); - if (ret) - return ret; + if (info->nentries) { + int ret = xt_compat_init_offsets(NFPROTO_BRIDGE, + info->nentries); + if (ret) + return ret; + } return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 53ecda10b790..13e2ae6be620 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -174,7 +174,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? - "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); + "REMOTE_SHUTDOWN" : "UNKNOWN CTRL COMMAND"); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index fcb40c12b1f8..3b3d33ea9ed8 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2569,6 +2569,11 @@ static int try_write(struct ceph_connection *con) int ret = 1; dout("try_write start %p state %lu\n", con, con->state); + if (con->state != CON_STATE_PREOPEN && + con->state != CON_STATE_CONNECTING && + con->state != CON_STATE_NEGOTIATING && + con->state != CON_STATE_OPEN) + return 0; more: dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); @@ -2594,6 +2599,8 @@ more: } more_kvec: + BUG_ON(!con->sock); + /* kvec data queued? */ if (con->out_kvec_left) { ret = write_partial_kvec(con); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index b3dac24412d3..21ac6e3b96bb 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc) __open_session(monc); } +static void un_backoff(struct ceph_mon_client *monc) +{ + monc->hunt_mult /= 2; /* reduce by 50% */ + if (monc->hunt_mult < 1) + monc->hunt_mult = 1; + dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult); +} + /* * Reschedule delayed work timer. */ @@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work) if (!monc->hunting) { ceph_con_keepalive(&monc->con); __validate_auth(monc); + un_backoff(monc); } if (is_auth && @@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc) dout("%s found mon%d\n", __func__, monc->cur_mon); monc->hunting = false; monc->had_a_connection = true; - monc->hunt_mult /= 2; /* reduce by 50% */ - if (monc->hunt_mult < 1) - monc->hunt_mult = 1; + un_backoff(monc); + __schedule_delayed(monc); } } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ea2a6c9fb7ce..d2667e5dddc3 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -157,10 +157,12 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, #endif /* CONFIG_BLOCK */ static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, - struct ceph_bvec_iter *bvec_pos) + struct ceph_bvec_iter *bvec_pos, + u32 num_bvecs) { osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; osd_data->bvec_pos = *bvec_pos; + osd_data->num_bvecs = num_bvecs; } #define osd_req_op_data(oreq, whch, typ, fld) \ @@ -237,6 +239,22 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); #endif /* CONFIG_BLOCK */ +void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes) +{ + struct ceph_osd_data *osd_data; + struct ceph_bvec_iter it = { + .bvecs = bvecs, + .iter = { .bi_size = bytes }, + }; + + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); + ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); +} +EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); + void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_bvec_iter *bvec_pos) @@ -244,7 +262,7 @@ void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, extent, osd_data); - ceph_osd_data_bvecs_init(osd_data, bvec_pos); + ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); @@ -287,7 +305,8 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, unsigned int which, - struct bio_vec *bvecs, u32 bytes) + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes) { struct ceph_osd_data *osd_data; struct ceph_bvec_iter it = { @@ -296,7 +315,7 @@ void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, }; osd_data = osd_req_op_data(osd_req, which, cls, request_data); - ceph_osd_data_bvecs_init(osd_data, &it); + ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); osd_req->r_ops[which].cls.indata_len += bytes; osd_req->r_ops[which].indata_len += bytes; } diff --git a/net/compat.c b/net/compat.c index 5ae7437d3853..7242cce5631b 100644 --- a/net/compat.c +++ b/net/compat.c @@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname, optname == SO_ATTACH_REUSEPORT_CBPF) return do_set_attach_filter(sock, level, optname, optval, optlen); - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) + if (!COMPAT_USE_64BIT_TIME && + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) return do_set_sock_timeout(sock, level, optname, optval, optlen); return sock_setsockopt(sock, level, optname, optval, optlen); @@ -448,7 +449,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, static int compat_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) + if (!COMPAT_USE_64BIT_TIME && + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) return do_get_sock_timeout(sock, level, optname, optval, optlen); return sock_getsockopt(sock, level, optname, optval, optlen); } diff --git a/net/core/dev.c b/net/core/dev.c index 969462ebb296..af0558b00c6c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2969,7 +2969,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb, } EXPORT_SYMBOL(passthru_features_check); -static netdev_features_t dflt_features_check(const struct sk_buff *skb, +static netdev_features_t dflt_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index e3e6a3e2ca22..d884d8f5f0e5 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -839,7 +839,7 @@ void dev_mc_flush(struct net_device *dev) EXPORT_SYMBOL(dev_mc_flush); /** - * dev_mc_flush - Init multicast address list + * dev_mc_init - Init multicast address list * @dev: device * * Init multicast address list. diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 03416e6dd5d7..ba02f0dfe85c 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1032,6 +1032,11 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, info_size = sizeof(info); if (copy_from_user(&info, useraddr, info_size)) return -EFAULT; + /* Since malicious users may modify the original data, + * we need to check whether FLOW_RSS is still requested. + */ + if (!(info.flow_type & FLOW_RSS)) + return -EINVAL; } if (info.cmd == ETHTOOL_GRXCLSRLALL) { diff --git a/net/core/filter.c b/net/core/filter.c index d31aff93270d..e77c30ca491d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3240,6 +3240,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, skb_dst_set(skb, (struct dst_entry *) md); info = &md->u.tun_info; + memset(info, 0, sizeof(*info)); info->mode = IP_TUNNEL_INFO_TX; info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 7b7a14abba28..ce519861be59 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t); static void __neigh_notify(struct neighbour *n, int type, int flags, u32 pid); static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, + struct net_device *dev); #ifdef CONFIG_PROC_FS static const struct file_operations neigh_stat_seq_fops; @@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) { write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev); - pneigh_ifdown(tbl, dev); - write_unlock_bh(&tbl->lock); + pneigh_ifdown_and_unlock(tbl, dev); del_timer_sync(&tbl->proxy_timer); pneigh_queue_purge(&tbl->proxy_queue); @@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, return -ENOENT; } -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, + struct net_device *dev) { - struct pneigh_entry *n, **np; + struct pneigh_entry *n, **np, *freelist = NULL; u32 h; for (h = 0; h <= PNEIGH_HASHMASK; h++) { @@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) while ((n = *np) != NULL) { if (!dev || n->dev == dev) { *np = n->next; - if (tbl->pdestructor) - tbl->pdestructor(n); - if (n->dev) - dev_put(n->dev); - kfree(n); + n->next = freelist; + freelist = n; continue; } np = &n->next; } } + write_unlock_bh(&tbl->lock); + while ((n = freelist)) { + freelist = n->next; + n->next = NULL; + if (tbl->pdestructor) + tbl->pdestructor(n); + if (n->dev) + dev_put(n->dev); + kfree(n); + } return -ENOENT; } @@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); if (!err) { - if (tb[NDA_IFINDEX]) + if (tb[NDA_IFINDEX]) { + if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) + return -EINVAL; filter_idx = nla_get_u32(tb[NDA_IFINDEX]); - - if (tb[NDA_MASTER]) + } + if (tb[NDA_MASTER]) { + if (nla_len(tb[NDA_MASTER]) != sizeof(u32)) + return -EINVAL; filter_master_idx = nla_get_u32(tb[NDA_MASTER]); - + } if (filter_idx || filter_master_idx) flags |= NLM_F_DUMP_FILTERED; } diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 92d016e87816..385f153fe031 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val) DCCPF_SEQ_WMAX)); } +static void dccp_tasklet_schedule(struct sock *sk) +{ + struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet; + + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + sock_hold(sk); + __tasklet_schedule(t); + } +} + static void ccid2_hc_tx_rto_expire(struct timer_list *t) { struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); @@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t) /* if we were blocked before, we may now send cwnd=1 packet */ if (sender_was_blocked) - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); + dccp_tasklet_schedule(sk); /* restart backed-off timer */ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); out: @@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) done: /* check if incoming Acks allow pending packets to be sent */ if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); + dccp_tasklet_schedule(sk); dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); } diff --git a/net/dccp/timer.c b/net/dccp/timer.c index b50a8732ff43..1501a20a94ca 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data) else dccp_write_xmit(sk); bh_unlock_sock(sk); + sock_put(sk); } static void dccp_write_xmit_timer(struct timer_list *t) @@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t) struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; dccp_write_xmitlet((unsigned long)sk); - sock_put(sk); } void dccp_init_xmit_timers(struct sock *sk) diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 8396705deffc..40c851693f77 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; - if (!opt_len) { - printk(KERN_WARNING - "Empty option to dns_resolver key\n"); + if (opt_len <= 0 || opt_len > 128) { + pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", + opt_len); return -EINVAL; } @@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) } bad_option_value: - printk(KERN_WARNING - "Option '%*.*s' to dns_resolver key:" - " bad/missing value\n", - opt_nlen, opt_nlen, opt); + pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n", + opt_nlen, opt_nlen, opt); return -EINVAL; } while (opt = next_opt + 1, opt < end); } diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b8d95cb71c25..44a7e16bf3b5 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -20,8 +20,8 @@ typedef unsigned __bitwise lowpan_rx_result; struct frag_lowpan_compare_key { u16 tag; u16 d_size; - const struct ieee802154_addr src; - const struct ieee802154_addr dst; + struct ieee802154_addr src; + struct ieee802154_addr dst; }; /* Equivalent of ipv4 struct ipq diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 1790b65944b3..2cc224106b69 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -75,14 +75,14 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb, { struct netns_ieee802154_lowpan *ieee802154_lowpan = net_ieee802154_lowpan(net); - struct frag_lowpan_compare_key key = { - .tag = cb->d_tag, - .d_size = cb->d_size, - .src = *src, - .dst = *dst, - }; + struct frag_lowpan_compare_key key = {}; struct inet_frag_queue *q; + key.tag = cb->d_tag; + key.d_size = cb->d_size; + key.src = *src; + key.dst = *dst; + q = inet_frag_find(&ieee802154_lowpan->frags, &key); if (!q) return NULL; @@ -372,7 +372,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) struct lowpan_frag_queue *fq; struct net *net = dev_net(skb->dev); struct lowpan_802154_cb *cb = lowpan_802154_cb(skb); - struct ieee802154_hdr hdr; + struct ieee802154_hdr hdr = {}; int err; if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) diff --git a/net/ife/ife.c b/net/ife/ife.c index 7d1ec76e7f43..13bbf8cb6a39 100644 --- a/net/ife/ife.c +++ b/net/ife/ife.c @@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen) int total_pull; u16 ifehdrln; + if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN)) + return NULL; + ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len); ifehdrln = ntohs(ifehdr->metalen); total_pull = skb->dev->hard_header_len + ifehdrln; @@ -92,12 +95,43 @@ struct meta_tlvhdr { __be16 len; }; +static bool __ife_tlv_meta_valid(const unsigned char *skbdata, + const unsigned char *ifehdr_end) +{ + const struct meta_tlvhdr *tlv; + u16 tlvlen; + + if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end)) + return false; + + tlv = (const struct meta_tlvhdr *)skbdata; + tlvlen = ntohs(tlv->len); + + /* tlv length field is inc header, check on minimum */ + if (tlvlen < NLA_HDRLEN) + return false; + + /* overflow by NLA_ALIGN check */ + if (NLA_ALIGN(tlvlen) < tlvlen) + return false; + + if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end)) + return false; + + return true; +} + /* Caller takes care of presenting data in network order */ -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen) +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype, + u16 *dlen, u16 *totlen) { - struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; + struct meta_tlvhdr *tlv; + + if (!__ife_tlv_meta_valid(skbdata, ifehdr_end)) + return NULL; + tlv = (struct meta_tlvhdr *)skbdata; *dlen = ntohs(tlv->len) - NLA_HDRLEN; *attrtype = ntohs(tlv->type); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4c11b810a447..83c73bab2c3d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1109,6 +1109,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, struct ip_options_rcu *opt; struct rtable *rt; + rt = *rtp; + if (unlikely(!rt)) + return -EFAULT; + /* * setup for corking. */ @@ -1124,9 +1128,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->flags |= IPCORK_OPT; cork->addr = ipc->addr; } - rt = *rtp; - if (unlikely(!rt)) - return -EFAULT; + /* * We steal reference to this route, caller should not release it */ diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 05e47d777009..56a010622f70 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { - if (!daddr) - return -EINVAL; + if (!daddr) { + err = -EINVAL; + goto out_free; + } faddr = ipc.opt->opt.faddr; } tos = get_rttos(&ipc, inet); @@ -842,6 +844,7 @@ back_from_confirm: out: ip_rt_put(rt); +out_free: if (free) kfree(ipc.opt); if (!err) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ccb25d80f679..29268efad247 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -709,7 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_gw = gw; fnhe->fnhe_pmtu = pmtu; fnhe->fnhe_mtu_locked = lock; - fnhe->fnhe_expires = expires; + fnhe->fnhe_expires = max(1UL, expires); /* Exception created; mark the cached routes for the nexthop * stale, so anyone caching it rechecks if this exception @@ -1297,6 +1297,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } +static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) +{ + struct fnhe_hash_bucket *hash; + struct fib_nh_exception *fnhe, __rcu **fnhe_p; + u32 hval = fnhe_hashfun(daddr); + + spin_lock_bh(&fnhe_lock); + + hash = rcu_dereference_protected(nh->nh_exceptions, + lockdep_is_held(&fnhe_lock)); + hash += hval; + + fnhe_p = &hash->chain; + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); + while (fnhe) { + if (fnhe->fnhe_daddr == daddr) { + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); + fnhe_flush_routes(fnhe); + kfree_rcu(fnhe, rcu); + break; + } + fnhe_p = &fnhe->fnhe_next; + fnhe = rcu_dereference_protected(fnhe->fnhe_next, + lockdep_is_held(&fnhe_lock)); + } + + spin_unlock_bh(&fnhe_lock); +} + static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) { struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); @@ -1310,8 +1340,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) for (fnhe = rcu_dereference(hash[hval].chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { - if (fnhe->fnhe_daddr == daddr) + if (fnhe->fnhe_daddr == daddr) { + if (fnhe->fnhe_expires && + time_after(jiffies, fnhe->fnhe_expires)) { + ip_del_fnhe(nh, daddr); + break; + } return fnhe; + } } return NULL; } @@ -1339,6 +1375,7 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, fnhe->fnhe_gw = 0; fnhe->fnhe_pmtu = 0; fnhe->fnhe_expires = 0; + fnhe->fnhe_mtu_locked = false; fnhe_flush_routes(fnhe); orig = NULL; } @@ -1636,36 +1673,6 @@ static void ip_handle_martian_source(struct net_device *dev, #endif } -static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) -{ - struct fnhe_hash_bucket *hash; - struct fib_nh_exception *fnhe, __rcu **fnhe_p; - u32 hval = fnhe_hashfun(daddr); - - spin_lock_bh(&fnhe_lock); - - hash = rcu_dereference_protected(nh->nh_exceptions, - lockdep_is_held(&fnhe_lock)); - hash += hval; - - fnhe_p = &hash->chain; - fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); - while (fnhe) { - if (fnhe->fnhe_daddr == daddr) { - rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( - fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); - fnhe_flush_routes(fnhe); - kfree_rcu(fnhe, rcu); - break; - } - fnhe_p = &fnhe->fnhe_next; - fnhe = rcu_dereference_protected(fnhe->fnhe_next, - lockdep_is_held(&fnhe_lock)); - } - - spin_unlock_bh(&fnhe_lock); -} - /* called in rcu_read_lock() section */ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, @@ -1719,20 +1726,10 @@ static int __mkroute_input(struct sk_buff *skb, fnhe = find_exception(&FIB_RES_NH(*res), daddr); if (do_cache) { - if (fnhe) { + if (fnhe) rth = rcu_dereference(fnhe->fnhe_rth_input); - if (rth && rth->dst.expires && - time_after(jiffies, rth->dst.expires)) { - ip_del_fnhe(&FIB_RES_NH(*res), daddr); - fnhe = NULL; - } else { - goto rt_cache; - } - } - - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); - -rt_cache: + else + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); goto out; @@ -2216,39 +2213,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res, * the loopback interface and the IP_PKTINFO ipi_ifindex will * be set to the loopback interface as well. */ - fi = NULL; + do_cache = false; } fnhe = NULL; do_cache &= fi != NULL; - if (do_cache) { + if (fi) { struct rtable __rcu **prth; struct fib_nh *nh = &FIB_RES_NH(*res); fnhe = find_exception(nh, fl4->daddr); + if (!do_cache) + goto add; if (fnhe) { prth = &fnhe->fnhe_rth_output; - rth = rcu_dereference(*prth); - if (rth && rth->dst.expires && - time_after(jiffies, rth->dst.expires)) { - ip_del_fnhe(nh, fl4->daddr); - fnhe = NULL; - } else { - goto rt_cache; + } else { + if (unlikely(fl4->flowi4_flags & + FLOWI_FLAG_KNOWN_NH && + !(nh->nh_gw && + nh->nh_scope == RT_SCOPE_LINK))) { + do_cache = false; + goto add; } + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); } - - if (unlikely(fl4->flowi4_flags & - FLOWI_FLAG_KNOWN_NH && - !(nh->nh_gw && - nh->nh_scope == RT_SCOPE_LINK))) { - do_cache = false; - goto add; - } - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); rth = rcu_dereference(*prth); - -rt_cache: if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) return rth; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bccc4c270087..c9d00ef54dec 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, { return skb->len < size_goal && sock_net(sk)->ipv4.sysctl_tcp_autocorking && - skb != tcp_write_queue_head(sk) && + !tcp_rtx_queue_empty(sk) && refcount_read(&sk->sk_wmem_alloc) > skb->truesize; } @@ -1204,7 +1204,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) uarg->zerocopy = 0; } - if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && + !tp->repair) { err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); if (err == -EINPROGRESS && copied_syn > 0) goto out; @@ -2368,6 +2369,7 @@ void tcp_write_queue_purge(struct sock *sk) INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); + tcp_sk(sk)->packets_out = 0; } int tcp_disconnect(struct sock *sk, int flags) @@ -2417,7 +2419,6 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; - tp->packets_out = 0; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_cnt = 0; tp->window_clamp = 0; @@ -2673,7 +2674,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, case TCP_REPAIR_QUEUE: if (!tp->repair) err = -EPERM; - else if (val < TCP_QUEUES_NR) + else if ((unsigned int)val < TCP_QUEUES_NR) tp->repair_queue = val; else err = -EINVAL; @@ -2813,8 +2814,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - /* Read the IP->Key mappings from userspace */ - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); + else + err = -EINVAL; break; #endif case TCP_USER_TIMEOUT: diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 158d105e76da..58e2f479ffb4 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -806,7 +806,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) } } } - bbr->idle_restart = 0; + /* Restart after idle ends only once we process a new S/ACK for data */ + if (rs->delivered > 0) + bbr->idle_restart = 0; } static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 367def6ddeda..e51c644484dc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3868,11 +3868,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); - /* If the TCP option is too short, we can short cut */ - if (length < TCPOLEN_MD5SIG) - return NULL; - - while (length > 0) { + /* If not enough data remaining, we can short cut */ + while (length >= TCPOLEN_MD5SIG) { int opcode = *ptr++; int opsize; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24b5c59b1c53..b61a770884fa 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -401,9 +401,9 @@ static int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score += 4; } @@ -952,8 +952,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (ipc.opt && ipc.opt->opt.srr) { - if (!daddr) - return -EINVAL; + if (!daddr) { + err = -EINVAL; + goto out_free; + } faddr = ipc.opt->opt.faddr; connected = 0; } @@ -1074,6 +1076,7 @@ do_append_data: out: ip_rt_put(rt); +out_free: if (free) kfree(ipc.opt); if (!err) diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 6794ddf0547c..11e4e80cf7e9 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -34,16 +34,15 @@ config IPV6_ROUTE_INFO bool "IPv6: Route Information (RFC 4191) support" depends on IPV6_ROUTER_PREF ---help--- - This is experimental support of Route Information. + Support of Route Information. If unsure, say N. config IPV6_OPTIMISTIC_DAD bool "IPv6: Enable RFC 4429 Optimistic DAD" ---help--- - This is experimental support for optimistic Duplicate - Address Detection. It allows for autoconfigured addresses - to be used more quickly. + Support for optimistic Duplicate Address Detection. It allows for + autoconfigured addresses to be used more quickly. If unsure, say N. @@ -280,7 +279,7 @@ config IPV6_MROUTE depends on IPV6 select IP_MROUTE_COMMON ---help--- - Experimental support for IPv6 multicast forwarding. + Support for IPv6 multicast forwarding. If unsure, say N. config IPV6_MROUTE_MULTIPLE_TABLES diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index c214ffec02f0..ca957dd93a29 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -669,7 +669,7 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu) else mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr); - dev->mtu = max_t(int, mtu, IPV6_MIN_MTU); + dev->mtu = max_t(int, mtu, IPV4_MIN_MTU); } /** @@ -881,7 +881,7 @@ static void vti6_dev_setup(struct net_device *dev) dev->priv_destructor = vti6_dev_free; dev->type = ARPHRD_TUNNEL6; - dev->min_mtu = IPV6_MIN_MTU; + dev->min_mtu = IPV4_MIN_MTU; dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index ccbfa83e4bb0..ce77bcc2490c 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6 fields such as the source, destination, flowlabel, hop-limit and the packet mark. +if NF_NAT_IPV6 + +config NFT_CHAIN_NAT_IPV6 + tristate "IPv6 nf_tables nat chain support" + help + This option enables the "nat" chain for IPv6 in nf_tables. This + chain type is used to perform Network Address Translation (NAT) + packet transformations such as the source, destination address and + source and destination ports. + +config NFT_MASQ_IPV6 + tristate "IPv6 masquerade support for nf_tables" + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV6 + help + This is the expression that provides IPv4 masquerading support for + nf_tables. + +config NFT_REDIR_IPV6 + tristate "IPv6 redirect support for nf_tables" + depends on NFT_REDIR + select NF_NAT_REDIRECT + help + This is the expression that provides IPv4 redirect support for + nf_tables. + +endif # NF_NAT_IPV6 + config NFT_REJECT_IPV6 select NF_REJECT_IPV6 default NFT_REJECT @@ -107,39 +135,12 @@ config NF_NAT_IPV6 if NF_NAT_IPV6 -config NFT_CHAIN_NAT_IPV6 - depends on NF_TABLES_IPV6 - tristate "IPv6 nf_tables nat chain support" - help - This option enables the "nat" chain for IPv6 in nf_tables. This - chain type is used to perform Network Address Translation (NAT) - packet transformations such as the source, destination address and - source and destination ports. - config NF_NAT_MASQUERADE_IPV6 tristate "IPv6 masquerade support" help This is the kernel functionality to provide NAT in the masquerade flavour (automatic source address selection) for IPv6. -config NFT_MASQ_IPV6 - tristate "IPv6 masquerade support for nf_tables" - depends on NF_TABLES_IPV6 - depends on NFT_MASQ - select NF_NAT_MASQUERADE_IPV6 - help - This is the expression that provides IPv4 masquerading support for - nf_tables. - -config NFT_REDIR_IPV6 - tristate "IPv6 redirect support for nf_tables" - depends on NF_TABLES_IPV6 - depends on NFT_REDIR - select NF_NAT_REDIRECT - help - This is the expression that provides IPv4 redirect support for - nf_tables. - endif # NF_NAT_IPV6 config IP6_NF_IPTABLES diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 49b954d6d0fa..f4d61736c41a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1835,11 +1835,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb, const struct ipv6hdr *inner_iph; const struct icmp6hdr *icmph; struct ipv6hdr _inner_iph; + struct icmp6hdr _icmph; if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) goto out; - icmph = icmp6_hdr(skb); + icmph = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_icmph), &_icmph); + if (!icmph) + goto out; + if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && icmph->icmp6_type != ICMPV6_PKT_TOOBIG && icmph->icmp6_type != ICMPV6_TIME_EXCEED && @@ -3975,6 +3980,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu) static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, + [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) }, [RTA_OIF] = { .type = NLA_U32 }, [RTA_IIF] = { .type = NLA_U32 }, [RTA_PRIORITY] = { .type = NLA_U32 }, @@ -3986,6 +3992,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_EXPIRES] = { .type = NLA_U32 }, [RTA_UID] = { .type = NLA_U32 }, [RTA_MARK] = { .type = NLA_U32 }, + [RTA_TABLE] = { .type = NLA_U32 }, }; static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index f343e6f0fc95..5fe139484919 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) isrh->nexthdr = proto; hdr->daddr = isrh->segments[isrh->first_segment]; - set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); + set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr); #ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(isrh)) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4ec76a87aeb8..ea0730028e5d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -148,9 +148,9 @@ static int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score++; } diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index f85f0d7480ac..4a46df8441c9 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -341,6 +341,9 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; + xfrm_state_flush(net, IPSEC_PROTO_ANY, false); + xfrm_flush_gc(); + for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); diff --git a/net/key/af_key.c b/net/key/af_key.c index 7e2e7188e7f4..e62e52e8f141 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -437,6 +437,24 @@ static int verify_address_len(const void *p) return 0; } +static inline int sadb_key_len(const struct sadb_key *key) +{ + int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8); + + return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes, + sizeof(uint64_t)); +} + +static int verify_key_len(const void *p) +{ + const struct sadb_key *key = p; + + if (sadb_key_len(key) > key->sadb_key_len) + return -EINVAL; + + return 0; +} + static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx) { return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + @@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void * return -EINVAL; if (ext_hdrs[ext_type-1] != NULL) return -EINVAL; - if (ext_type == SADB_EXT_ADDRESS_SRC || - ext_type == SADB_EXT_ADDRESS_DST || - ext_type == SADB_EXT_ADDRESS_PROXY || - ext_type == SADB_X_EXT_NAT_T_OA) { + switch (ext_type) { + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + case SADB_X_EXT_NAT_T_OA: if (verify_address_len(p)) return -EINVAL; - } - if (ext_type == SADB_X_EXT_SEC_CTX) { + break; + case SADB_X_EXT_SEC_CTX: if (verify_sec_ctx_len(p)) return -EINVAL; + break; + case SADB_EXT_KEY_AUTH: + case SADB_EXT_KEY_ENCRYPT: + if (verify_key_len(p)) + return -EINVAL; + break; + default: + break; } ext_hdrs[ext_type-1] = (void *) p; } @@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (key != NULL && sa->sadb_sa_auth != SADB_X_AALG_NULL && - ((key->sadb_key_bits+7) / 8 == 0 || - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) + key->sadb_key_bits == 0) return ERR_PTR(-EINVAL); key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; if (key != NULL && sa->sadb_sa_encrypt != SADB_EALG_NULL && - ((key->sadb_key_bits+7) / 8 == 0 || - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) + key->sadb_key_bits == 0) return ERR_PTR(-EINVAL); x = xfrm_state_alloc(net); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0fbd3ee26165..40261cb68e83 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) } EXPORT_SYMBOL_GPL(l2tp_tunnel_get); +struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) +{ + const struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_tunnel *tunnel; + int count = 0; + + rcu_read_lock_bh(); + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { + if (++count > nth) { + l2tp_tunnel_inc_refcount(tunnel); + rcu_read_unlock_bh(); + return tunnel; + } + } + rcu_read_unlock_bh(); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); + /* Lookup a session. A new reference is held on the returned session. */ struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_tunnel *tunnel, @@ -335,26 +355,6 @@ err_tlock: } EXPORT_SYMBOL_GPL(l2tp_session_register); -struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) -{ - struct l2tp_net *pn = l2tp_pernet(net); - struct l2tp_tunnel *tunnel; - int count = 0; - - rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - if (++count > nth) { - rcu_read_unlock_bh(); - return tunnel; - } - } - - rcu_read_unlock_bh(); - - return NULL; -} -EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); - /***************************************************************************** * Receive data handling *****************************************************************************/ diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index ba33cbec71eb..c199020f8a8a 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) } struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); +struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); + void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); struct l2tp_session *l2tp_session_get(const struct net *net, @@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, const char *ifname); -struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 72e713da4733..7f1e842ef05a 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data { static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) { - pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); + /* Drop reference taken during previous invocation */ + if (pd->tunnel) + l2tp_tunnel_dec_refcount(pd->tunnel); + + pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx); pd->tunnel_idx++; } @@ -96,7 +100,17 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) { - /* nothing to do */ + struct l2tp_dfs_seq_data *pd = v; + + if (!pd || pd == SEQ_START_TOKEN) + return; + + /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ + if (pd->tunnel) { + l2tp_tunnel_dec_refcount(pd->tunnel); + pd->tunnel = NULL; + pd->session = NULL; + } } static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index b05dbd9ffcb2..6616c9fd292f 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback struct net *net = sock_net(skb->sk); for (;;) { - tunnel = l2tp_tunnel_find_nth(net, ti); + tunnel = l2tp_tunnel_get_nth(net, ti); if (tunnel == NULL) goto out; if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - tunnel, L2TP_CMD_TUNNEL_GET) < 0) + tunnel, L2TP_CMD_TUNNEL_GET) < 0) { + l2tp_tunnel_dec_refcount(tunnel); goto out; + } + l2tp_tunnel_dec_refcount(tunnel); ti++; } @@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback for (;;) { if (tunnel == NULL) { - tunnel = l2tp_tunnel_find_nth(net, ti); + tunnel = l2tp_tunnel_get_nth(net, ti); if (tunnel == NULL) goto out; } @@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback session = l2tp_session_get_nth(tunnel, si); if (session == NULL) { ti++; + l2tp_tunnel_dec_refcount(tunnel); tunnel = NULL; si = 0; continue; @@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback cb->nlh->nlmsg_seq, NLM_F_MULTI, session, L2TP_CMD_SESSION_GET) < 0) { l2tp_session_dec_refcount(session); + l2tp_tunnel_dec_refcount(tunnel); break; } l2tp_session_dec_refcount(session); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 896bbca9bdaa..1fd9e145076a 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) && + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) && + sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) && + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6)) + goto end; + if (sp->sa_protocol != PX_PROTO_OL2TP) goto end; @@ -1551,16 +1558,19 @@ struct pppol2tp_seq_data { static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) { + /* Drop reference taken during previous invocation */ + if (pd->tunnel) + l2tp_tunnel_dec_refcount(pd->tunnel); + for (;;) { - pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); + pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx); pd->tunnel_idx++; - if (pd->tunnel == NULL) - break; + /* Only accept L2TPv2 tunnels */ + if (!pd->tunnel || pd->tunnel->version == 2) + return; - /* Ignore L2TPv3 tunnels */ - if (pd->tunnel->version < 3) - break; + l2tp_tunnel_dec_refcount(pd->tunnel); } } @@ -1609,7 +1619,17 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) static void pppol2tp_seq_stop(struct seq_file *p, void *v) { - /* nothing to do */ + struct pppol2tp_seq_data *pd = v; + + if (!pd || pd == SEQ_START_TOKEN) + return; + + /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ + if (pd->tunnel) { + l2tp_tunnel_dec_refcount(pd->tunnel); + pd->tunnel = NULL; + pd->session = NULL; + } } static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 01dcc0823d1f..1beeea9549fa 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -199,9 +199,19 @@ static int llc_ui_release(struct socket *sock) llc->laddr.lsap, llc->daddr.lsap); if (!llc_send_disc(sk)) llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); - if (!sock_flag(sk, SOCK_ZAPPED)) + if (!sock_flag(sk, SOCK_ZAPPED)) { + struct llc_sap *sap = llc->sap; + + /* Hold this for release_sock(), so that llc_backlog_rcv() + * could still use it. + */ + llc_sap_hold(sap); llc_sap_remove_socket(llc->sap, sk); - release_sock(sk); + release_sock(sk); + llc_sap_put(sap); + } else { + release_sock(sk); + } if (llc->dev) dev_put(llc->dev); sock_put(sk); @@ -920,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (size > llc->dev->mtu) size = llc->dev->mtu; copied = size - hdrlen; + rc = -EINVAL; + if (copied < 0) + goto release; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 163121192aca..4d78375f9872 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) { - struct llc_sock *llc = llc_sk(sk); - - del_timer(&llc->pf_cycle_timer.timer); - del_timer(&llc->ack_timer.timer); - del_timer(&llc->rej_sent_timer.timer); - del_timer(&llc->busy_state_timer.timer); - llc->ack_must_be_send = 0; - llc->ack_pf = 0; + llc_sk_stop_all_timers(sk, false); return 0; } diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 110e32bcb399..c0ac522b48a1 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -961,6 +961,26 @@ out: return sk; } +void llc_sk_stop_all_timers(struct sock *sk, bool sync) +{ + struct llc_sock *llc = llc_sk(sk); + + if (sync) { + del_timer_sync(&llc->pf_cycle_timer.timer); + del_timer_sync(&llc->ack_timer.timer); + del_timer_sync(&llc->rej_sent_timer.timer); + del_timer_sync(&llc->busy_state_timer.timer); + } else { + del_timer(&llc->pf_cycle_timer.timer); + del_timer(&llc->ack_timer.timer); + del_timer(&llc->rej_sent_timer.timer); + del_timer(&llc->busy_state_timer.timer); + } + + llc->ack_must_be_send = 0; + llc->ack_pf = 0; +} + /** * llc_sk_free - Frees a LLC socket * @sk - socket to free @@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk) llc->state = LLC_CONN_OUT_OF_SVC; /* Stop all (possibly) running timers */ - llc_conn_ac_stop_all_timers(sk, NULL); + llc_sk_stop_all_timers(sk, true); #ifdef DEBUG_LLC_CONN_ALLOC printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, skb_queue_len(&llc->pdu_unack_q), diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 595c662a61e8..ac4295296514 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -8,6 +8,7 @@ * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -970,6 +971,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, sta->ampdu_mlme.addba_req_num[tid] = 0; + tid_tx->timeout = + le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); + if (tid_tx->timeout) { mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 69449db7e283..233068756502 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -36,6 +36,7 @@ #define IEEE80211_AUTH_TIMEOUT (HZ / 5) #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) +#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) @@ -1787,7 +1788,7 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, params[ac].acm = acm; params[ac].uapsd = uapsd; - if (params->cw_min == 0 || + if (params[ac].cw_min == 0 || params[ac].cw_min > params[ac].cw_max) { sdata_info(sdata, "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", @@ -3814,16 +3815,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) tx_flags); if (tx_flags == 0) { - auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; - auth_data->timeout_started = true; - run_again(sdata, auth_data->timeout); + if (auth_data->algorithm == WLAN_AUTH_SAE) + auth_data->timeout = jiffies + + IEEE80211_AUTH_TIMEOUT_SAE; + else + auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; } else { auth_data->timeout = round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); - auth_data->timeout_started = true; - run_again(sdata, auth_data->timeout); } + auth_data->timeout_started = true; + run_again(sdata, auth_data->timeout); + return 0; } @@ -3894,8 +3898,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ifmgd->status_received = false; if (ifmgd->auth_data && ieee80211_is_auth(fc)) { if (status_acked) { - ifmgd->auth_data->timeout = - jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; + if (ifmgd->auth_data->algorithm == + WLAN_AUTH_SAE) + ifmgd->auth_data->timeout = + jiffies + + IEEE80211_AUTH_TIMEOUT_SAE; + else + ifmgd->auth_data->timeout = + jiffies + + IEEE80211_AUTH_TIMEOUT_SHORT; run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 535de3161a78..05a265cd573d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1135,7 +1136,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, } /* reset session timer */ - if (reset_agg_timer && tid_tx->timeout) + if (reset_agg_timer) tid_tx->last_tx = jiffies; return queued; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 704b3832dbad..44d8a55e9721 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -594,6 +594,7 @@ config NFT_QUOTA config NFT_REJECT default m if NETFILTER_ADVANCED=n tristate "Netfilter nf_tables reject support" + depends on !NF_TABLES_INET || (IPV6!=m || m) help This option adds the "reject" expression that you can use to explicitly deny and notify via TCP reset/ICMP informational errors diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5ebde4b15810..f36098887ad0 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) strlcpy(cfg.mcast_ifn, dm->mcast_ifn, sizeof(cfg.mcast_ifn)); cfg.syncid = dm->syncid; - rtnl_lock(); - mutex_lock(&ipvs->sync_mutex); ret = start_sync_thread(ipvs, &cfg, dm->state); - mutex_unlock(&ipvs->sync_mutex); - rtnl_unlock(); } else { mutex_lock(&ipvs->sync_mutex); ret = stop_sync_thread(ipvs, dm->state); @@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) if (ipvs->mixed_address_family_dests > 0) return -EINVAL; - rtnl_lock(); - mutex_lock(&ipvs->sync_mutex); ret = start_sync_thread(ipvs, &c, nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); - mutex_unlock(&ipvs->sync_mutex); - rtnl_unlock(); return ret; } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index fbaf3bd05b2e..001501e25625 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -49,6 +49,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ @@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val) /* * Specifiy default interface for outgoing multicasts */ -static int set_mcast_if(struct sock *sk, char *ifname) +static int set_mcast_if(struct sock *sk, struct net_device *dev) { - struct net_device *dev; struct inet_sock *inet = inet_sk(sk); - struct net *net = sock_net(sk); - - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname) * in the in_addr structure passed in as a parameter. */ static int -join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) +join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev) { - struct net *net = sock_net(sk); struct ip_mreqn mreq; - struct net_device *dev; int ret; memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) #ifdef CONFIG_IP_VS_IPV6 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, - char *ifname) + struct net_device *dev) { - struct net *net = sock_net(sk); - struct net_device *dev; int ret; - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, } #endif -static int bind_mcastif_addr(struct socket *sock, char *ifname) +static int bind_mcastif_addr(struct socket *sock, struct net_device *dev) { - struct net *net = sock_net(sock->sk); - struct net_device *dev; __be32 addr; struct sockaddr_in sin; - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; - addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); if (!addr) pr_err("You probably need to specify IP address on " "multicast interface.\n"); IP_VS_DBG(7, "binding socket with (%s) %pI4\n", - ifname, &addr); + dev->name, &addr); /* Now bind the socket with the address of multicast interface */ sin.sin_family = AF_INET; @@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen, /* * Set up sending multicast socket over UDP */ -static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) +static int make_send_sock(struct netns_ipvs *ipvs, int id, + struct net_device *dev, struct socket **sock_ret) { /* multicast addr */ union ipvs_sockaddr mcast_addr; @@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); - return ERR_PTR(result); + goto error; } - result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn); + *sock_ret = sock; + result = set_mcast_if(sock->sk, dev); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); goto error; @@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) set_sock_size(sock->sk, 1, result); if (AF_INET == ipvs->mcfg.mcast_af) - result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn); + result = bind_mcastif_addr(sock, dev); else result = 0; if (result < 0) { @@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) goto error; } - return sock; + return 0; error: - sock_release(sock); - return ERR_PTR(result); + return result; } /* * Set up receiving multicast socket over UDP */ -static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, - int ifindex) +static int make_receive_sock(struct netns_ipvs *ipvs, int id, + struct net_device *dev, struct socket **sock_ret) { /* multicast addr */ union ipvs_sockaddr mcast_addr; @@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); - return ERR_PTR(result); + goto error; } + *sock_ret = sock; /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = SK_CAN_REUSE; result = sysctl_sync_sock_size(ipvs); @@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, set_sock_size(sock->sk, 0, result); get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); - sock->sk->sk_bound_dev_if = ifindex; + sock->sk->sk_bound_dev_if = dev->ifindex; result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); if (result < 0) { pr_err("Error binding to the multicast addr\n"); @@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, #ifdef CONFIG_IP_VS_IPV6 if (ipvs->bcfg.mcast_af == AF_INET6) result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, - ipvs->bcfg.mcast_ifn); + dev); else #endif result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, - ipvs->bcfg.mcast_ifn); + dev); if (result < 0) { pr_err("Error joining to the multicast group\n"); goto error; } - return sock; + return 0; error: - sock_release(sock); - return ERR_PTR(result); + return result; } @@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data) int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, int state) { - struct ip_vs_sync_thread_data *tinfo; + struct ip_vs_sync_thread_data *tinfo = NULL; struct task_struct **array = NULL, *task; - struct socket *sock; struct net_device *dev; char *name; int (*threadfn)(void *data); - int id, count, hlen; + int id = 0, count, hlen; int result = -ENOMEM; u16 mtu, min_mtu; @@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); + /* Do not hold one mutex and then to block on another */ + for (;;) { + rtnl_lock(); + if (mutex_trylock(&ipvs->sync_mutex)) + break; + rtnl_unlock(); + mutex_lock(&ipvs->sync_mutex); + if (rtnl_trylock()) + break; + mutex_unlock(&ipvs->sync_mutex); + } + if (!ipvs->sync_state) { count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); ipvs->threads_mask = count - 1; @@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); if (!dev) { pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); - return -ENODEV; + result = -ENODEV; + goto out_early; } hlen = (AF_INET6 == c->mcast_af) ? sizeof(struct ipv6hdr) + sizeof(struct udphdr) : @@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, c->sync_maxlen = mtu - hlen; if (state == IP_VS_STATE_MASTER) { + result = -EEXIST; if (ipvs->ms) - return -EEXIST; + goto out_early; ipvs->mcfg = *c; name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; } else if (state == IP_VS_STATE_BACKUP) { + result = -EEXIST; if (ipvs->backup_threads) - return -EEXIST; + goto out_early; ipvs->bcfg = *c; name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; } else { - return -EINVAL; + result = -EINVAL; + goto out_early; } if (state == IP_VS_STATE_MASTER) { struct ipvs_master_sync_state *ms; + result = -ENOMEM; ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); if (!ipvs->ms) goto out; @@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, } else { array = kcalloc(count, sizeof(struct task_struct *), GFP_KERNEL); + result = -ENOMEM; if (!array) goto out; } - tinfo = NULL; for (id = 0; id < count; id++) { - if (state == IP_VS_STATE_MASTER) - sock = make_send_sock(ipvs, id); - else - sock = make_receive_sock(ipvs, id, dev->ifindex); - if (IS_ERR(sock)) { - result = PTR_ERR(sock); - goto outtinfo; - } + result = -ENOMEM; tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); if (!tinfo) - goto outsocket; + goto out; tinfo->ipvs = ipvs; - tinfo->sock = sock; + tinfo->sock = NULL; if (state == IP_VS_STATE_BACKUP) { tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, GFP_KERNEL); if (!tinfo->buf) - goto outtinfo; + goto out; } else { tinfo->buf = NULL; } tinfo->id = id; + if (state == IP_VS_STATE_MASTER) + result = make_send_sock(ipvs, id, dev, &tinfo->sock); + else + result = make_receive_sock(ipvs, id, dev, &tinfo->sock); + if (result < 0) + goto out; task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); if (IS_ERR(task)) { result = PTR_ERR(task); - goto outtinfo; + goto out; } tinfo = NULL; if (state == IP_VS_STATE_MASTER) @@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ipvs->sync_state |= state; spin_unlock_bh(&ipvs->sync_buff_lock); + mutex_unlock(&ipvs->sync_mutex); + rtnl_unlock(); + /* increase the module use count */ ip_vs_use_count_inc(); return 0; -outsocket: - sock_release(sock); - -outtinfo: - if (tinfo) { - sock_release(tinfo->sock); - kfree(tinfo->buf); - kfree(tinfo); - } +out: + /* We do not need RTNL lock anymore, release it here so that + * sock_release below and in the kthreads can use rtnl_lock + * to leave the mcast group. + */ + rtnl_unlock(); count = id; while (count-- > 0) { if (state == IP_VS_STATE_MASTER) @@ -1932,13 +1927,23 @@ outtinfo: else kthread_stop(array[count]); } - kfree(array); - -out: if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { kfree(ipvs->ms); ipvs->ms = NULL; } + mutex_unlock(&ipvs->sync_mutex); + if (tinfo) { + if (tinfo->sock) + sock_release(tinfo->sock); + kfree(tinfo->buf); + kfree(tinfo); + } + kfree(array); + return result; + +out_early: + mutex_unlock(&ipvs->sync_mutex); + rtnl_unlock(); return result; } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 8ef21d9f9a00..4b2b3d53acfc 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, static inline int expect_matches(const struct nf_conntrack_expect *a, const struct nf_conntrack_expect *b) { - return a->master == b->master && a->class == b->class && + return a->master == b->master && nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) && net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && @@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) h = nf_ct_expect_dst_hash(net, &expect->tuple); hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { if (expect_matches(i, expect)) { + if (i->class != expect->class) + return -EALREADY; + if (nf_ct_remove_expect(i)) break; } else if (expect_clash(i, expect)) { diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 9fe0ddc333fb..277bbfe26478 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -9,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> +#include <linux/kmemleak.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/rcupdate.h> @@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) rcu_read_unlock(); alloc = max(newlen, NF_CT_EXT_PREALLOC); + kmemleak_not_leak(old); new = __krealloc(old, alloc, gfp); if (!new) return NULL; diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 4dbb5bad4363..908e51e2dc2b 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, datalen, rtp_exp, rtcp_exp, mediaoff, medialen, daddr); else { - if (nf_ct_expect_related(rtp_exp) == 0) { - if (nf_ct_expect_related(rtcp_exp) != 0) - nf_ct_unexpect_related(rtp_exp); - else + /* -EALREADY handling works around end-points that send + * SDP messages with identical port but different media type, + * we pretend expectation was set up. + */ + int errp = nf_ct_expect_related(rtp_exp); + + if (errp == 0 || errp == -EALREADY) { + int errcp = nf_ct_expect_related(rtcp_exp); + + if (errcp == 0 || errcp == -EALREADY) ret = NF_ACCEPT; + else if (errp == 0) + nf_ct_unexpect_related(rtp_exp); } } nf_ct_expect_put(rtcp_exp); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 9134cc429ad4..04d4e3772584 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, } if (nlh->nlmsg_flags & NLM_F_REPLACE) { - if (nft_is_active_next(net, old_rule)) { - trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, - old_rule); - if (trans == NULL) { - err = -ENOMEM; - goto err2; - } - nft_deactivate_next(net, old_rule); - chain->use--; - list_add_tail_rcu(&rule->list, &old_rule->list); - } else { + if (!nft_is_active_next(net, old_rule)) { err = -ENOENT; goto err2; } - } else if (nlh->nlmsg_flags & NLM_F_APPEND) - if (old_rule) - list_add_rcu(&rule->list, &old_rule->list); - else - list_add_tail_rcu(&rule->list, &chain->rules); - else { - if (old_rule) - list_add_tail_rcu(&rule->list, &old_rule->list); - else - list_add_rcu(&rule->list, &chain->rules); - } + trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, + old_rule); + if (trans == NULL) { + err = -ENOMEM; + goto err2; + } + nft_deactivate_next(net, old_rule); + chain->use--; - if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { - err = -ENOMEM; - goto err3; + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { + err = -ENOMEM; + goto err2; + } + + list_add_tail_rcu(&rule->list, &old_rule->list); + } else { + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { + err = -ENOMEM; + goto err2; + } + + if (nlh->nlmsg_flags & NLM_F_APPEND) { + if (old_rule) + list_add_rcu(&rule->list, &old_rule->list); + else + list_add_tail_rcu(&rule->list, &chain->rules); + } else { + if (old_rule) + list_add_tail_rcu(&rule->list, &old_rule->list); + else + list_add_rcu(&rule->list, &chain->rules); + } } chain->use++; return 0; -err3: - list_del_rcu(&rule->list); err2: nf_tables_rule_destroy(&ctx, rule); err1: @@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, err = ops->init(set, &desc, nla); if (err < 0) - goto err2; + goto err3; err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) - goto err3; + goto err4; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; -err3: +err4: ops->destroy(set); +err3: + kfree(set->name); err2: kvfree(set); err1: @@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) struct nft_base_chain *basechain; if (nft_trans_chain_name(trans)) - strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); + swap(trans->ctx.chain->name, nft_trans_chain_name(trans)); if (!nft_is_base_chain(trans->ctx.chain)) return; diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 773da82190dc..94df000abb92 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark"); MODULE_ALIAS("ip6t_connmark"); static unsigned int -connmark_tg_shift(struct sk_buff *skb, - const struct xt_connmark_tginfo1 *info, - u8 shift_bits, u8 shift_dir) +connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) { enum ip_conntrack_info ctinfo; + u_int32_t new_targetmark; struct nf_conn *ct; u_int32_t newmark; @@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb, switch (info->mode) { case XT_CONNMARK_SET: newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; - if (shift_dir == D_SHIFT_RIGHT) - newmark >>= shift_bits; + if (info->shift_dir == D_SHIFT_RIGHT) + newmark >>= info->shift_bits; else - newmark <<= shift_bits; + newmark <<= info->shift_bits; + if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_SAVE: - newmark = (ct->mark & ~info->ctmask) ^ - (skb->mark & info->nfmask); - if (shift_dir == D_SHIFT_RIGHT) - newmark >>= shift_bits; + new_targetmark = (skb->mark & info->nfmask); + if (info->shift_dir == D_SHIFT_RIGHT) + new_targetmark >>= info->shift_bits; else - newmark <<= shift_bits; + new_targetmark <<= info->shift_bits; + + newmark = (ct->mark & ~info->ctmask) ^ + new_targetmark; if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: - newmark = (skb->mark & ~info->nfmask) ^ - (ct->mark & info->ctmask); - if (shift_dir == D_SHIFT_RIGHT) - newmark >>= shift_bits; + new_targetmark = (ct->mark & info->ctmask); + if (info->shift_dir == D_SHIFT_RIGHT) + new_targetmark >>= info->shift_bits; else - newmark <<= shift_bits; + new_targetmark <<= info->shift_bits; + + newmark = (skb->mark & ~info->nfmask) ^ + new_targetmark; skb->mark = newmark; break; } @@ -89,8 +93,14 @@ static unsigned int connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connmark_tginfo1 *info = par->targinfo; - - return connmark_tg_shift(skb, info, 0, 0); + const struct xt_connmark_tginfo2 info2 = { + .ctmark = info->ctmark, + .ctmask = info->ctmask, + .nfmask = info->nfmask, + .mode = info->mode, + }; + + return connmark_tg_shift(skb, &info2); } static unsigned int @@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connmark_tginfo2 *info = par->targinfo; - return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info, - info->shift_bits, info->shift_dir); + return connmark_tg_shift(skb, info); } static int connmark_tg_check(const struct xt_tgchk_param *par) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 55342c4d5cec..2e2dd88fc79f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2606,13 +2606,13 @@ static int netlink_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, - "sk Eth Pid Groups " - "Rmem Wmem Dump Locks Drops Inode\n"); + "sk Eth Pid Groups " + "Rmem Wmem Dump Locks Drops Inode\n"); } else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); - seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n", + seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->portid, diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index d7da99a0b0b8..9696ef96b719 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -57,6 +57,8 @@ int nsh_pop(struct sk_buff *skb) return -ENOMEM; nh = (struct nshhdr *)(skb->data); length = nsh_hdr_len(nh); + if (length < NSH_BASE_HDR_LEN) + return -EINVAL; inner_proto = tun_p_to_eth_p(nh->np); if (!pskb_may_pull(skb, length)) return -ENOMEM; @@ -90,6 +92,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) goto out; nsh_len = nsh_hdr_len(nsh_hdr(skb)); + if (nsh_len < NSH_BASE_HDR_LEN) + goto out; if (unlikely(!pskb_may_pull(skb, nsh_len))) goto out; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 7322aa1e382e..492ab0c36f7c 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1712,13 +1712,10 @@ static void nlattr_set(struct nlattr *attr, u8 val, /* The nlattr stream should already have been validated */ nla_for_each_nested(nla, attr, rem) { - if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { - if (tbl[nla_type(nla)].next) - tbl = tbl[nla_type(nla)].next; - nlattr_set(nla, val, tbl); - } else { + if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) + nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl); + else memset(nla_data(nla), val, nla_len(nla)); - } if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 616cb9c18f88..01f3515cada0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) skb_set_queue_mapping(skb, queue_index); } -/* register_prot_hook must be invoked with the po->bind_lock held, +/* __register_prot_hook must be invoked through register_prot_hook * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ -static void register_prot_hook(struct sock *sk) +static void __register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); @@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk) } } -/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock - * held. If the sync parameter is true, we will temporarily drop +static void register_prot_hook(struct sock *sk) +{ + lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); + __register_prot_hook(sk); +} + +/* If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the @@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); + lockdep_assert_held_once(&po->bind_lock); + po->running = 0; if (po->fanout) @@ -3008,6 +3015,7 @@ static int packet_release(struct socket *sock) packet_flush_mclist(sk); + lock_sock(sk); if (po->rx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); @@ -3017,6 +3025,7 @@ static int packet_release(struct socket *sock) memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); } + release_sock(sk); f = fanout_release(sk); @@ -3250,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, if (proto) { po->prot_hook.type = proto; - register_prot_hook(sk); + __register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); @@ -3643,6 +3652,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv union tpacket_req_u req_u; int len; + lock_sock(sk); switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: @@ -3653,12 +3663,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv len = sizeof(req_u.req3); break; } - if (optlen < len) - return -EINVAL; - if (copy_from_user(&req_u.req, optval, len)) - return -EFAULT; - return packet_set_ring(sk, &req_u, 0, - optname == PACKET_TX_RING); + if (optlen < len) { + ret = -EINVAL; + } else { + if (copy_from_user(&req_u.req, optval, len)) + ret = -EFAULT; + else + ret = packet_set_ring(sk, &req_u, 0, + optname == PACKET_TX_RING); + } + release_sock(sk); + return ret; } case PACKET_COPY_THRESH: { @@ -3724,12 +3739,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; - po->tp_loss = !!val; - return 0; + + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_loss = !!val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_AUXDATA: { @@ -3740,7 +3761,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; + lock_sock(sk); po->auxdata = !!val; + release_sock(sk); return 0; } case PACKET_ORIGDEV: @@ -3752,7 +3775,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; + lock_sock(sk); po->origdev = !!val; + release_sock(sk); return 0; } case PACKET_VNET_HDR: @@ -3761,15 +3786,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (sock->type != SOCK_RAW) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; - po->has_vnet_hdr = !!val; - return 0; + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->has_vnet_hdr = !!val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_TIMESTAMP: { @@ -3807,11 +3837,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; - po->tp_tx_has_off = !!val; + + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_tx_has_off = !!val; + ret = 0; + } + release_sock(sk); return 0; } case PACKET_QDISC_BYPASS: @@ -4208,8 +4244,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; - lock_sock(sk); - rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; @@ -4347,7 +4381,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: - release_sock(sk); return err; } diff --git a/net/packet/internal.h b/net/packet/internal.h index a1d2b2319ae9..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -112,10 +112,12 @@ struct packet_sock { int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; - unsigned int running:1, /* prot_hook is attached*/ - auxdata:1, + unsigned int running; /* bind_lock must be held */ + unsigned int auxdata:1, /* writer must hold sock lock */ origdev:1, - has_vnet_hdr:1; + has_vnet_hdr:1, + tp_loss:1, + tp_tx_has_off:1; int pressure; int ifindex; /* bound device */ __be16 num; @@ -125,8 +127,6 @@ struct packet_sock { enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; - unsigned int tp_loss:1; - unsigned int tp_tx_has_off:1; unsigned int tp_tstamp; struct net_device __rcu *cached_dev; int (*xmit)(struct sk_buff *skb); diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index b33e5aeb4c06..2aa07b547b16 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1135,3 +1135,4 @@ module_exit(qrtr_proto_fini); MODULE_DESCRIPTION("Qualcomm IPC-router driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_NETPROTO(PF_QIPCRTR); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index eea1d8611b20..13b38ad0fa4a 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -547,7 +547,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, ic->i_send_cq, ic->i_recv_cq); - return ret; + goto out; sends_out: vfree(ic->i_sends); @@ -572,6 +572,7 @@ send_cq_out: ic->i_send_cq = NULL; rds_ibdev_out: rds_ib_remove_conn(rds_ibdev, conn); +out: rds_ib_dev_put(rds_ibdev); return ret; diff --git a/net/rds/recv.c b/net/rds/recv.c index de50e2126e40..dc67458b52f0 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, struct rds_cmsg_rx_trace t; int i, j; + memset(&t, 0, sizeof(t)); inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); t.rx_traces = rs->rs_rx_traces; for (i = 0; i < rs->rs_rx_traces; i++) { diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 41bd496531d4..00192a996be0 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev) ret = rfkill_register(rfkill->rfkill_dev); if (ret < 0) - return ret; + goto err_destroy; platform_set_drvdata(pdev, rfkill); dev_info(&pdev->dev, "%s device registered.\n", rfkill->name); return 0; + +err_destroy: + rfkill_destroy(rfkill->rfkill_dev); + + return ret; } static int rfkill_gpio_remove(struct platform_device *pdev) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 9a2c8e7c000e..2b463047dd7b 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -313,7 +313,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, memset(&cp, 0, sizeof(cp)); cp.local = rx->local; cp.key = key; - cp.security_level = 0; + cp.security_level = rx->min_sec_level; cp.exclusive = false; cp.upgrade = upgrade; cp.service_id = srx->srx_service; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 90d7079e0aa9..19975d2ca9a2 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -476,6 +476,7 @@ enum rxrpc_call_flag { RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ RXRPC_CALL_PINGING, /* Ping in process */ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ + RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ }; /* diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index c717152070df..1350f1be8037 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -40,7 +40,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, } __attribute__((packed)) pkt; struct rxrpc_ackinfo ack_info; size_t len; - int ioc; + int ret, ioc; u32 serial, mtu, call_id, padding; _enter("%d", conn->debug_id); @@ -135,10 +135,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, break; } - kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); conn->params.peer->last_tx_at = ktime_get_real(); + if (ret < 0) + trace_rxrpc_tx_fail(conn->debug_id, serial, ret, + rxrpc_tx_fail_call_final_resend); + _leave(""); - return; } /* @@ -236,6 +239,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { + trace_rxrpc_tx_fail(conn->debug_id, serial, ret, + rxrpc_tx_fail_conn_abort); _debug("sendmsg failed: %d", ret); return -EAGAIN; } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 0410d2277ca2..b5fd6381313d 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -971,7 +971,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, if (timo) { unsigned long now = jiffies, expect_rx_by; - expect_rx_by = jiffies + timo; + expect_rx_by = now + timo; WRITE_ONCE(call->expect_rx_by, expect_rx_by); rxrpc_reduce_call_timer(call, expect_rx_by, now, rxrpc_timer_set_for_normal); diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 93b5d910b4a1..8325f1b86840 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -71,7 +71,8 @@ static void rxrpc_send_version_request(struct rxrpc_local *local, ret = kernel_sendmsg(local->socket, &msg, iov, 2, len); if (ret < 0) - _debug("sendmsg failed: %d", ret); + trace_rxrpc_tx_fail(local->debug_id, 0, ret, + rxrpc_tx_fail_version_reply); _leave(""); } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 8b54e9531d52..b493e6b62740 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -134,22 +134,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) } } - /* we want to receive ICMP errors */ - opt = 1; - ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; - } + switch (local->srx.transport.family) { + case AF_INET: + /* we want to receive ICMP errors */ + opt = 1; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } - /* we want to set the don't fragment bit */ - opt = IP_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; + /* we want to set the don't fragment bit */ + opt = IP_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + break; + + case AF_INET6: + /* we want to receive ICMP errors */ + opt = 1; + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + + /* we want to set the don't fragment bit */ + opt = IPV6_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + break; + + default: + BUG(); } /* set the socket up */ diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 7f1fc04775b3..f03de1c59ba3 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -210,6 +210,9 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, if (ping) call->ping_time = now; conn->params.peer->last_tx_at = ktime_get_real(); + if (ret < 0) + trace_rxrpc_tx_fail(call->debug_id, serial, ret, + rxrpc_tx_fail_call_ack); if (call->state < RXRPC_CALL_COMPLETE) { if (ret < 0) { @@ -294,6 +297,10 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call) ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1, sizeof(pkt)); conn->params.peer->last_tx_at = ktime_get_real(); + if (ret < 0) + trace_rxrpc_tx_fail(call->debug_id, serial, ret, + rxrpc_tx_fail_call_abort); + rxrpc_put_connection(conn); return ret; @@ -387,6 +394,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, conn->params.peer->last_tx_at = ktime_get_real(); up_read(&conn->params.local->defrag_sem); + if (ret < 0) + trace_rxrpc_tx_fail(call->debug_id, serial, ret, + rxrpc_tx_fail_call_data_nofrag); if (ret == -EMSGSIZE) goto send_fragmentable; @@ -414,6 +424,17 @@ done: rxrpc_timer_set_for_lost_ack); } } + + if (sp->hdr.seq == 1 && + !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, + &call->flags)) { + unsigned long nowj = jiffies, expect_rx_by; + + expect_rx_by = nowj + call->next_rx_timo; + WRITE_ONCE(call->expect_rx_by, expect_rx_by); + rxrpc_reduce_call_timer(call, expect_rx_by, nowj, + rxrpc_timer_set_for_normal); + } } rxrpc_set_keepalive(call); @@ -465,6 +486,10 @@ send_fragmentable: #endif } + if (ret < 0) + trace_rxrpc_tx_fail(call->debug_id, serial, ret, + rxrpc_tx_fail_call_data_frag); + up_write(&conn->params.local->defrag_sem); goto done; } @@ -482,6 +507,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) struct kvec iov[2]; size_t size; __be32 code; + int ret; _enter("%d", local->debug_id); @@ -516,7 +542,10 @@ void rxrpc_reject_packets(struct rxrpc_local *local) whdr.flags ^= RXRPC_CLIENT_INITIATED; whdr.flags &= RXRPC_CLIENT_INITIATED; - kernel_sendmsg(local->socket, &msg, iov, 2, size); + ret = kernel_sendmsg(local->socket, &msg, iov, 2, size); + if (ret < 0) + trace_rxrpc_tx_fail(local->debug_id, 0, ret, + rxrpc_tx_fail_reject); } rxrpc_free_skb(skb, rxrpc_skb_rx_freed); @@ -567,7 +596,8 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer) ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len); if (ret < 0) - _debug("sendmsg failed: %d", ret); + trace_rxrpc_tx_fail(peer->debug_id, 0, ret, + rxrpc_tx_fail_version_keepalive); peer->last_tx_at = ktime_get_real(); _leave(""); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 78c2f95d1f22..0ed8b651cec2 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -28,39 +28,39 @@ static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); * Find the peer associated with an ICMP packet. */ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, - const struct sk_buff *skb) + const struct sk_buff *skb, + struct sockaddr_rxrpc *srx) { struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); - struct sockaddr_rxrpc srx; _enter(""); - memset(&srx, 0, sizeof(srx)); - srx.transport_type = local->srx.transport_type; - srx.transport_len = local->srx.transport_len; - srx.transport.family = local->srx.transport.family; + memset(srx, 0, sizeof(*srx)); + srx->transport_type = local->srx.transport_type; + srx->transport_len = local->srx.transport_len; + srx->transport.family = local->srx.transport.family; /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice * versa? */ - switch (srx.transport.family) { + switch (srx->transport.family) { case AF_INET: - srx.transport.sin.sin_port = serr->port; + srx->transport.sin.sin_port = serr->port; switch (serr->ee.ee_origin) { case SO_EE_ORIGIN_ICMP: _net("Rx ICMP"); - memcpy(&srx.transport.sin.sin_addr, + memcpy(&srx->transport.sin.sin_addr, skb_network_header(skb) + serr->addr_offset, sizeof(struct in_addr)); break; case SO_EE_ORIGIN_ICMP6: _net("Rx ICMP6 on v4 sock"); - memcpy(&srx.transport.sin.sin_addr, + memcpy(&srx->transport.sin.sin_addr, skb_network_header(skb) + serr->addr_offset + 12, sizeof(struct in_addr)); break; default: - memcpy(&srx.transport.sin.sin_addr, &ip_hdr(skb)->saddr, + memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr, sizeof(struct in_addr)); break; } @@ -68,25 +68,25 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: - srx.transport.sin6.sin6_port = serr->port; + srx->transport.sin6.sin6_port = serr->port; switch (serr->ee.ee_origin) { case SO_EE_ORIGIN_ICMP6: _net("Rx ICMP6"); - memcpy(&srx.transport.sin6.sin6_addr, + memcpy(&srx->transport.sin6.sin6_addr, skb_network_header(skb) + serr->addr_offset, sizeof(struct in6_addr)); break; case SO_EE_ORIGIN_ICMP: _net("Rx ICMP on v6 sock"); - srx.transport.sin6.sin6_addr.s6_addr32[0] = 0; - srx.transport.sin6.sin6_addr.s6_addr32[1] = 0; - srx.transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); - memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12, + srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; + srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; + srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); + memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12, skb_network_header(skb) + serr->addr_offset, sizeof(struct in_addr)); break; default: - memcpy(&srx.transport.sin6.sin6_addr, + memcpy(&srx->transport.sin6.sin6_addr, &ipv6_hdr(skb)->saddr, sizeof(struct in6_addr)); break; @@ -98,7 +98,7 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, BUG(); } - return rxrpc_lookup_peer_rcu(local, &srx); + return rxrpc_lookup_peer_rcu(local, srx); } /* @@ -146,6 +146,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *se void rxrpc_error_report(struct sock *sk) { struct sock_exterr_skb *serr; + struct sockaddr_rxrpc srx; struct rxrpc_local *local = sk->sk_user_data; struct rxrpc_peer *peer; struct sk_buff *skb; @@ -166,7 +167,7 @@ void rxrpc_error_report(struct sock *sk) } rcu_read_lock(); - peer = rxrpc_lookup_peer_icmp_rcu(local, skb); + peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); if (peer && !rxrpc_get_peer_maybe(peer)) peer = NULL; if (!peer) { @@ -176,6 +177,8 @@ void rxrpc_error_report(struct sock *sk) return; } + trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); + if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && serr->ee.ee_type == ICMP_DEST_UNREACH && serr->ee.ee_code == ICMP_FRAG_NEEDED)) { @@ -209,9 +212,6 @@ static void rxrpc_store_error(struct rxrpc_peer *peer, ee = &serr->ee; - _net("Rx Error o=%d t=%d c=%d e=%d", - ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno); - err = ee->ee_errno; switch (ee->ee_origin) { diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 588fea0dd362..6c0ae27fff84 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -664,7 +664,8 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { - _debug("sendmsg failed: %d", ret); + trace_rxrpc_tx_fail(conn->debug_id, serial, ret, + rxrpc_tx_fail_conn_challenge); return -EAGAIN; } @@ -719,7 +720,8 @@ static int rxkad_send_response(struct rxrpc_connection *conn, ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len); if (ret < 0) { - _debug("sendmsg failed: %d", ret); + trace_rxrpc_tx_fail(conn->debug_id, serial, ret, + rxrpc_tx_fail_conn_response); return -EAGAIN; } diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 206e802ccbdc..be01f9c5d963 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -223,6 +223,15 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ret = rxrpc_send_data_packet(call, skb, false); if (ret < 0) { + switch (ret) { + case -ENETUNREACH: + case -EHOSTUNREACH: + case -ECONNREFUSED: + rxrpc_set_call_completion(call, + RXRPC_CALL_LOCAL_ERROR, + 0, ret); + goto out; + } _debug("need instant resend %d", ret); rxrpc_instant_resend(call, ix); } else { @@ -241,6 +250,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, rxrpc_timer_set_for_send); } +out: rxrpc_free_skb(skb, rxrpc_skb_tx_freed); _leave(""); } diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index a5994cf0512b..8527cfdc446d 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, } } - return 0; + return -ENOENT; } static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, @@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, u16 mtype; u16 dlen; - curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); + curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype, + &dlen, NULL); + if (!curr_data) { + qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); + return TC_ACT_SHOT; + } if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { /* abuse overlimits to count when we receive metadata diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index ddf69fc01bdf..6138d1d71900 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, return 0; if (!flags) { - tcf_idr_release(*a, bind); + if (exists) + tcf_idr_release(*a, bind); return -EINVAL; } diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index bbcbdce732cc..ad050d7d4b46 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (exists && bind) return 0; - if (!lflags) + if (!lflags) { + if (exists) + tcf_idr_release(*a, bind); return -EINVAL; + } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b66754f52a9f..963e4bf0aab8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -152,8 +152,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, NL_SET_ERR_MSG(extack, "TC classifier not found"); err = -ENOENT; } - goto errout; #endif + goto errout; } tp->classify = tp->ops->classify; tp->protocol = protocol; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a366e4c9413a..4808713c73b9 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f) return f->next == &detached; } +static bool fq_flow_is_throttled(const struct fq_flow *f) +{ + return f->next == &throttled; +} + +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) +{ + if (head->first) + head->last->next = flow; + else + head->first = flow; + head->last = flow; + flow->next = NULL; +} + +static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) +{ + rb_erase(&f->rate_node, &q->delayed); + q->throttled_flows--; + fq_flow_add_tail(&q->old_flows, f); +} + static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) { struct rb_node **p = &q->delayed.rb_node, *parent = NULL; @@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) static struct kmem_cache *fq_flow_cachep __read_mostly; -static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) -{ - if (head->first) - head->last->next = flow; - else - head->first = flow; - head->last = flow; - flow->next = NULL; -} /* limit number of collected flows per round */ #define FQ_GC_MAX 8 @@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; + if (fq_flow_is_throttled(f)) + fq_flow_unset_throttled(q, f); f->time_next_packet = 0ULL; } return f; @@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) q->time_next_delayed_flow = f->time_next_packet; break; } - rb_erase(p, &q->delayed); - q->throttled_flows--; - fq_flow_add_tail(&q->old_flows, f); + fq_flow_unset_throttled(q, f); } } diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 837806dd5799..a47179da24e6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1024,8 +1024,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sctp_inq *inqueue; - int state; + int first_time = 1; /* is this the first time through the loop */ int error = 0; + int state; /* The association should be held so we should be safe. */ ep = asoc->ep; @@ -1036,6 +1037,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + /* If the first chunk in the packet is AUTH, do special + * processing specified in Section 6.3 of SCTP-AUTH spec + */ + if (first_time && subtype.chunk == SCTP_CID_AUTH) { + struct sctp_chunkhdr *next_hdr; + + next_hdr = sctp_inq_peek(inqueue); + if (!next_hdr) + goto normal; + + /* If the next chunk is COOKIE-ECHO, skip the AUTH + * chunk while saving a pointer to it so we can do + * Authentication later (during cookie-echo + * processing). + */ + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { + chunk->auth_chunk = skb_clone(chunk->skb, + GFP_ATOMIC); + chunk->auth = 1; + continue; + } + } + +normal: /* SCTP-AUTH, Section 6.3: * The receiver has a list of chunk types which it expects * to be received only after an AUTH-chunk. This list has @@ -1074,6 +1099,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) /* If there is an error on chunk, discard this packet. */ if (error && chunk) chunk->pdiscard = 1; + + if (first_time) + first_time = 0; } sctp_association_put(asoc); } diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 23ebc5318edc..eb93ffe2408b 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -217,7 +217,7 @@ new_skb: skb_pull(chunk->skb, sizeof(*ch)); chunk->subh.v = NULL; /* Subheader is no longer valid. */ - if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { + if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) { /* This is not a singleton */ chunk->singleton = 0; } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 31083b5035ec..42247110d842 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -556,46 +556,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, addr->v6.sin6_scope_id = 0; } -/* Compare addresses exactly. - * v4-mapped-v6 is also in consideration. - */ -static int sctp_v6_cmp_addr(const union sctp_addr *addr1, - const union sctp_addr *addr2) +static int __sctp_v6_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2) { if (addr1->sa.sa_family != addr2->sa.sa_family) { if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET6 && - ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { - if (addr2->v6.sin6_port == addr1->v4.sin_port && - addr2->v6.sin6_addr.s6_addr32[3] == - addr1->v4.sin_addr.s_addr) - return 1; - } + ipv6_addr_v4mapped(&addr2->v6.sin6_addr) && + addr2->v6.sin6_addr.s6_addr32[3] == + addr1->v4.sin_addr.s_addr) + return 1; + if (addr2->sa.sa_family == AF_INET && addr1->sa.sa_family == AF_INET6 && - ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { - if (addr1->v6.sin6_port == addr2->v4.sin_port && - addr1->v6.sin6_addr.s6_addr32[3] == - addr2->v4.sin_addr.s_addr) - return 1; - } + ipv6_addr_v4mapped(&addr1->v6.sin6_addr) && + addr1->v6.sin6_addr.s6_addr32[3] == + addr2->v4.sin_addr.s_addr) + return 1; + return 0; } - if (addr1->v6.sin6_port != addr2->v6.sin6_port) - return 0; + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) return 0; + /* If this is a linklocal address, compare the scope_id. */ - if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { - if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && - (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { - return 0; - } - } + if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) && + addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && + addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id) + return 0; return 1; } +/* Compare addresses exactly. + * v4-mapped-v6 is also in consideration. + */ +static int sctp_v6_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2) +{ + return __sctp_v6_cmp_addr(addr1, addr2) && + addr1->v6.sin6_port == addr2->v6.sin6_port; +} + /* Initialize addr struct to INADDR_ANY. */ static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) { @@ -875,8 +878,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2, struct sctp_sock *opt) { - struct sctp_af *af1, *af2; struct sock *sk = sctp_opt2sk(opt); + struct sctp_af *af1, *af2; af1 = sctp_get_af_specific(addr1->sa.sa_family); af2 = sctp_get_af_specific(addr2->sa.sa_family); @@ -892,10 +895,10 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) return 1; - if (addr1->sa.sa_family != addr2->sa.sa_family) - return 0; + if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET) + return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr; - return af1->cmp_addr(addr1, addr2); + return __sctp_v6_cmp_addr(addr1, addr2); } /* Verify that the provided sockaddr looks bindable. Common verification, diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5a4fb1dc8400..e62addb60434 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1152,7 +1152,7 @@ struct sctp_chunk *sctp_make_violation_max_retrans( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { - static const char error[] = "Association exceeded its max_retans count"; + static const char error[] = "Association exceeded its max_retrans count"; size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr); struct sctp_chunk *retval; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index dd0594a10961..c9ae3404b1bb 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -153,10 +153,7 @@ static enum sctp_disposition sctp_sf_violation_chunk( struct sctp_cmd_seq *commands); static enum sctp_ierror sctp_sf_authenticate( - struct net *net, - const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const union sctp_subtype type, struct sctp_chunk *chunk); static enum sctp_disposition __sctp_sf_do_9_1_abort( @@ -626,6 +623,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, return SCTP_DISPOSITION_CONSUME; } +static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk, + const struct sctp_association *asoc) +{ + struct sctp_chunk auth; + + if (!chunk->auth_chunk) + return true; + + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo + * is supposed to be authenticated and we have to do delayed + * authentication. We've just recreated the association using + * the information in the cookie and now it's much easier to + * do the authentication. + */ + + /* Make sure that we and the peer are AUTH capable */ + if (!net->sctp.auth_enable || !asoc->peer.auth_capable) + return false; + + /* set-up our fake chunk so that we can process it */ + auth.skb = chunk->auth_chunk; + auth.asoc = chunk->asoc; + auth.sctp_hdr = chunk->sctp_hdr; + auth.chunk_hdr = (struct sctp_chunkhdr *) + skb_push(chunk->auth_chunk, + sizeof(struct sctp_chunkhdr)); + skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); + auth.transport = chunk->transport; + + return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR; +} + /* * Respond to a normal COOKIE ECHO chunk. * We are the side that is being asked for an association. @@ -763,37 +792,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, if (error) goto nomem_init; - /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo - * is supposed to be authenticated and we have to do delayed - * authentication. We've just recreated the association using - * the information in the cookie and now it's much easier to - * do the authentication. - */ - if (chunk->auth_chunk) { - struct sctp_chunk auth; - enum sctp_ierror ret; - - /* Make sure that we and the peer are AUTH capable */ - if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { - sctp_association_free(new_asoc); - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - } - - /* set-up our fake chunk so that we can process it */ - auth.skb = chunk->auth_chunk; - auth.asoc = chunk->asoc; - auth.sctp_hdr = chunk->sctp_hdr; - auth.chunk_hdr = (struct sctp_chunkhdr *) - skb_push(chunk->auth_chunk, - sizeof(struct sctp_chunkhdr)); - skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); - auth.transport = chunk->transport; - - ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); - if (ret != SCTP_IERROR_NO_ERROR) { - sctp_association_free(new_asoc); - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - } + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) { + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } repl = sctp_make_cookie_ack(new_asoc, chunk); @@ -1794,13 +1795,18 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( GFP_ATOMIC)) goto nomem; + if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) + goto nomem; + + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) + return SCTP_DISPOSITION_DISCARD; + /* Make sure no new addresses are being added during the * restart. Though this is a pretty complicated attack * since you'd have to get inside the cookie. */ - if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { + if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) return SCTP_DISPOSITION_CONSUME; - } /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes * the peer has restarted (Action A), it MUST NOT setup a new @@ -1906,6 +1912,12 @@ static enum sctp_disposition sctp_sf_do_dupcook_b( GFP_ATOMIC)) goto nomem; + if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) + goto nomem; + + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) + return SCTP_DISPOSITION_DISCARD; + /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, @@ -2003,6 +2015,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( * a COOKIE ACK. */ + if (!sctp_auth_chunk_verify(net, chunk, asoc)) + return SCTP_DISPOSITION_DISCARD; + /* Don't accidentally move back into established state. */ if (asoc->state < SCTP_STATE_ESTABLISHED) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, @@ -2050,7 +2065,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( } } - repl = sctp_make_cookie_ack(new_asoc, chunk); + repl = sctp_make_cookie_ack(asoc, chunk); if (!repl) goto nomem; @@ -4165,10 +4180,7 @@ gen_shutdown: * The return value is the disposition of the chunk. */ static enum sctp_ierror sctp_sf_authenticate( - struct net *net, - const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const union sctp_subtype type, struct sctp_chunk *chunk) { struct sctp_shared_key *sh_key = NULL; @@ -4269,7 +4281,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, commands); auth_hdr = (struct sctp_authhdr *)chunk->skb->data; - error = sctp_sf_authenticate(net, ep, asoc, type, chunk); + error = sctp_sf_authenticate(asoc, chunk); switch (error) { case SCTP_IERROR_AUTH_BAD_HMAC: /* Generate the ERROR chunk and discard the rest diff --git a/net/sctp/stream.c b/net/sctp/stream.c index f799043abec9..f1f1d1b232ba 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new) new->out = NULL; new->in = NULL; + new->outcnt = 0; + new->incnt = 0; } static int sctp_send_reconf(struct sctp_association *asoc, diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 84207ad33e8e..8cb7d9858270 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -715,7 +715,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, return event; fail_mark: - sctp_chunk_put(chunk); kfree_skb(skb); fail: return NULL; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 5f8046c62d90..544bab42f925 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -292,6 +292,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); } +/* register a new rmb */ +static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc) +{ + /* register memory region for new rmb */ + if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) { + rmb_desc->regerr = 1; + return -EFAULT; + } + return 0; +} + static int smc_clnt_conf_first_link(struct smc_sock *smc) { struct smc_link_group *lgr = smc->conn.lgr; @@ -321,9 +332,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) smc_wr_remember_qp_attr(link); - rc = smc_wr_reg_send(link, - smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); - if (rc) + if (smc_reg_rmb(link, smc->conn.rmb_desc)) return SMC_CLC_DECL_INTERR; /* send CONFIRM LINK response over RoCE fabric */ @@ -473,13 +482,8 @@ static int smc_connect_rdma(struct smc_sock *smc) goto decline_rdma_unlock; } } else { - struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; - - if (!buf_desc->reused) { - /* register memory region for new rmb */ - rc = smc_wr_reg_send(link, - buf_desc->mr_rx[SMC_SINGLE_LINK]); - if (rc) { + if (!smc->conn.rmb_desc->reused) { + if (smc_reg_rmb(link, smc->conn.rmb_desc)) { reason_code = SMC_CLC_DECL_INTERR; goto decline_rdma_unlock; } @@ -719,9 +723,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) link = &lgr->lnk[SMC_SINGLE_LINK]; - rc = smc_wr_reg_send(link, - smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); - if (rc) + if (smc_reg_rmb(link, smc->conn.rmb_desc)) return SMC_CLC_DECL_INTERR; /* send CONFIRM LINK request to client over the RoCE fabric */ @@ -854,13 +856,8 @@ static void smc_listen_work(struct work_struct *work) smc_rx_init(new_smc); if (local_contact != SMC_FIRST_CONTACT) { - struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; - - if (!buf_desc->reused) { - /* register memory region for new rmb */ - rc = smc_wr_reg_send(link, - buf_desc->mr_rx[SMC_SINGLE_LINK]); - if (rc) { + if (!new_smc->conn.rmb_desc->reused) { + if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) { reason_code = SMC_CLC_DECL_INTERR; goto decline_rdma_unlock; } @@ -978,10 +975,6 @@ static void smc_tcp_listen_work(struct work_struct *work) } out: - if (lsmc->clcsock) { - sock_release(lsmc->clcsock); - lsmc->clcsock = NULL; - } release_sock(lsk); sock_put(&lsmc->sk); /* sock_hold in smc_listen */ } @@ -1170,13 +1163,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, /* delegate to CLC child sock */ release_sock(sk); mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); - /* if non-blocking connect finished ... */ lock_sock(sk); - if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { - sk->sk_err = smc->clcsock->sk->sk_err; - if (sk->sk_err) { - mask |= EPOLLERR; - } else { + sk->sk_err = smc->clcsock->sk->sk_err; + if (sk->sk_err) { + mask |= EPOLLERR; + } else { + /* if non-blocking connect finished ... */ + if (sk->sk_state == SMC_INIT && + mask & EPOLLOUT && + smc->clcsock->sk->sk_state != TCP_CLOSE) { rc = smc_connect_rdma(smc); if (rc < 0) mask |= EPOLLERR; @@ -1259,14 +1254,12 @@ static int smc_shutdown(struct socket *sock, int how) rc = smc_close_shutdown_write(smc); break; case SHUT_RD: - if (sk->sk_state == SMC_LISTEN) - rc = smc_close_active(smc); - else - rc = 0; - /* nothing more to do because peer is not involved */ + rc = 0; + /* nothing more to do because peer is not involved */ break; } - rc1 = kernel_sock_shutdown(smc->clcsock, how); + if (smc->clcsock) + rc1 = kernel_sock_shutdown(smc->clcsock, how); /* map sock_shutdown_cmd constants to sk_shutdown value range */ sk->sk_shutdown |= how + 1; @@ -1322,8 +1315,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, smc = smc_sk(sk); lock_sock(sk); - if (sk->sk_state != SMC_ACTIVE) + if (sk->sk_state != SMC_ACTIVE) { + release_sock(sk); goto out; + } + release_sock(sk); if (smc->use_fallback) rc = kernel_sendpage(smc->clcsock, page, offset, size, flags); @@ -1331,7 +1327,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, rc = sock_no_sendpage(sock, page, offset, size, flags); out: - release_sock(sk); return rc; } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index f44f6803f7ff..d4bd01bb44e1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -32,6 +32,9 @@ static u32 smc_lgr_num; /* unique link group number */ +static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk, + bool is_rmb); + static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) { /* client link group creation always follows the server link group @@ -234,9 +237,22 @@ static void smc_buf_unuse(struct smc_connection *conn) conn->sndbuf_size = 0; } if (conn->rmb_desc) { - conn->rmb_desc->reused = true; - conn->rmb_desc->used = 0; - conn->rmbe_size = 0; + if (!conn->rmb_desc->regerr) { + conn->rmb_desc->reused = 1; + conn->rmb_desc->used = 0; + conn->rmbe_size = 0; + } else { + /* buf registration failed, reuse not possible */ + struct smc_link_group *lgr = conn->lgr; + struct smc_link *lnk; + + write_lock_bh(&lgr->rmbs_lock); + list_del(&conn->rmb_desc->list); + write_unlock_bh(&lgr->rmbs_lock); + + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + smc_buf_free(conn->rmb_desc, lnk, true); + } } } diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 07e2a393e6d9..5dfcb15d529f 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -123,7 +123,8 @@ struct smc_buf_desc { */ u32 order; /* allocation order */ u32 used; /* currently used / unused */ - bool reused; /* new created / reused */ + u8 reused : 1; /* new created / reused */ + u8 regerr : 1; /* err during registration */ }; struct smc_rtoken { /* address/key of remote RMB */ diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index b9283ce5cd85..092bebc70048 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err) static void strp_start_timer(struct strparser *strp, long timeo) { - if (timeo) + if (timeo && timeo != LONG_MAX) mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); } @@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, strp_start_timer(strp, timeo); } + stm->accum_len += cand_len; strp->need_bytes = stm->strp.full_len - stm->accum_len; - stm->accum_len += cand_len; stm->early_eaten = cand_len; STRP_STATS_ADD(strp->stats.bytes, cand_len); desc->count = 0; /* Stop reading socket */ @@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, /* Hurray, we have a new message! */ cancel_delayed_work(&strp->msg_timer_work); strp->skb_head = NULL; + strp->need_bytes = 0; STRP_STATS_INCR(strp->stats.msgs); /* Give skb to upper layer */ @@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp) return; if (strp->need_bytes) { - if (strp_peek_len(strp) >= strp->need_bytes) - strp->need_bytes = 0; - else + if (strp_peek_len(strp) < strp->need_bytes) return; } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 0f08934b2cea..c81ef5e6c981 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry) struct dentry *clnt_dir = pipe_dentry->d_parent; struct dentry *gssd_dir = clnt_dir->d_parent; + dget(pipe_dentry); __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 5cc68a824f45..f2f63959fddd 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -72,6 +72,7 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) if (IS_ERR(mr->fmr.fm_mr)) goto out_fmr_err; + INIT_LIST_HEAD(&mr->mr_list); return 0; out_fmr_err: @@ -102,10 +103,6 @@ fmr_op_release_mr(struct rpcrdma_mr *mr) LIST_HEAD(unmap_list); int rc; - /* Ensure MW is not on any rl_registered list */ - if (!list_empty(&mr->mr_list)) - list_del(&mr->mr_list); - kfree(mr->fmr.fm_physaddrs); kfree(mr->mr_sg); diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index c5743a0960be..c59c5c788db0 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -110,6 +110,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) if (!mr->mr_sg) goto out_list_err; + INIT_LIST_HEAD(&mr->mr_list); sg_init_table(mr->mr_sg, depth); init_completion(&frwr->fr_linv_done); return 0; @@ -133,10 +134,6 @@ frwr_op_release_mr(struct rpcrdma_mr *mr) { int rc; - /* Ensure MR is not on any rl_registered list */ - if (!list_empty(&mr->mr_list)) - list_del(&mr->mr_list); - rc = ib_dereg_mr(mr->frwr.fr_mr); if (rc) pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", @@ -195,7 +192,7 @@ frwr_op_recover_mr(struct rpcrdma_mr *mr) return; out_release: - pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr); + pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr); r_xprt->rx_stats.mrs_orphaned++; spin_lock(&r_xprt->rx_buf.rb_mrlock); @@ -476,7 +473,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) list_for_each_entry(mr, mrs, mr_list) if (mr->mr_handle == rep->rr_inv_rkey) { - list_del(&mr->mr_list); + list_del_init(&mr->mr_list); trace_xprtrdma_remoteinv(mr); mr->frwr.fr_state = FRWR_IS_INVALID; rpcrdma_mr_unmap_and_put(mr); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index fe5eaca2d197..c345d365af88 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1254,6 +1254,11 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) list_del(&mr->mr_all); spin_unlock(&buf->rb_mrlock); + + /* Ensure MW is not on any rl_registered list */ + if (!list_empty(&mr->mr_list)) + list_del(&mr->mr_list); + ia->ri_ops->ro_release_mr(mr); count++; spin_lock(&buf->rb_mrlock); diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 3d3b423fa9c1..cb41b12a3bf8 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -380,7 +380,7 @@ rpcrdma_mr_pop(struct list_head *list) struct rpcrdma_mr *mr; mr = list_first_entry(list, struct rpcrdma_mr, mr_list); - list_del(&mr->mr_list); + list_del_init(&mr->mr_list); return mr; } diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 32dc33a94bc7..5453e564da82 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -777,7 +777,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg, ret = tipc_bearer_get_name(net, bearer_name, bearer_id); if (ret || !mon) - return -EINVAL; + return 0; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_MON_GET); diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index b1fe20972aa9..dd1c4fa2eb78 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -241,7 +241,8 @@ err: static struct publication *tipc_service_remove_publ(struct net *net, struct tipc_service *sc, u32 lower, u32 upper, - u32 node, u32 key) + u32 node, u32 key, + struct service_range **rng) { struct tipc_subscription *sub, *tmp; struct service_range *sr; @@ -275,19 +276,15 @@ static struct publication *tipc_service_remove_publ(struct net *net, list_del(&p->all_publ); list_del(&p->local_publ); - - /* Remove service range item if this was its last publication */ - if (list_empty(&sr->all_publ)) { + if (list_empty(&sr->all_publ)) last = true; - rb_erase(&sr->tree_node, &sc->ranges); - kfree(sr); - } /* Notify any waiting subscriptions */ list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, p->port, p->node, p->scope, last); } + *rng = sr; return p; } @@ -379,13 +376,20 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, u32 node, u32 key) { struct tipc_service *sc = tipc_service_find(net, type); + struct service_range *sr = NULL; struct publication *p = NULL; if (!sc) return NULL; spin_lock_bh(&sc->lock); - p = tipc_service_remove_publ(net, sc, lower, upper, node, key); + p = tipc_service_remove_publ(net, sc, lower, upper, node, key, &sr); + + /* Remove service range item if this was its last publication */ + if (sr && list_empty(&sr->all_publ)) { + rb_erase(&sr->tree_node, &sc->ranges); + kfree(sr); + } /* Delete service item if this no more publications and subscriptions */ if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { @@ -665,13 +669,14 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, /** * tipc_nametbl_subscribe - add a subscription object to the name table */ -void tipc_nametbl_subscribe(struct tipc_subscription *sub) +bool tipc_nametbl_subscribe(struct tipc_subscription *sub) { struct name_table *nt = tipc_name_table(sub->net); struct tipc_net *tn = tipc_net(sub->net); struct tipc_subscr *s = &sub->evt.s; u32 type = tipc_sub_read(s, seq.type); struct tipc_service *sc; + bool res = true; spin_lock_bh(&tn->nametbl_lock); sc = tipc_service_find(sub->net, type); @@ -685,8 +690,10 @@ void tipc_nametbl_subscribe(struct tipc_subscription *sub) pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, tipc_sub_read(s, seq.lower), tipc_sub_read(s, seq.upper)); + res = false; } spin_unlock_bh(&tn->nametbl_lock); + return res; } /** @@ -744,16 +751,17 @@ int tipc_nametbl_init(struct net *net) static void tipc_service_delete(struct net *net, struct tipc_service *sc) { struct service_range *sr, *tmpr; - struct publication *p, *tmpb; + struct publication *p, *tmp; spin_lock_bh(&sc->lock); rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { - list_for_each_entry_safe(p, tmpb, - &sr->all_publ, all_publ) { + list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { tipc_service_remove_publ(net, sc, p->lower, p->upper, - p->node, p->key); + p->node, p->key, &sr); kfree_rcu(p, rcu); } + rb_erase(&sr->tree_node, &sc->ranges); + kfree(sr); } hlist_del_init_rcu(&sc->service_list); spin_unlock_bh(&sc->lock); diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 4b14fc28d9e2..0febba41da86 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -126,7 +126,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, u32 lower, u32 upper, u32 node, u32 key); -void tipc_nametbl_subscribe(struct tipc_subscription *s); +bool tipc_nametbl_subscribe(struct tipc_subscription *s); void tipc_nametbl_unsubscribe(struct tipc_subscription *s); int tipc_nametbl_init(struct net *net); void tipc_nametbl_stop(struct net *net); diff --git a/net/tipc/net.c b/net/tipc/net.c index 856f9e97ea29..4fbaa0464405 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -252,6 +252,8 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) u64 *w0 = (u64 *)&node_id[0]; u64 *w1 = (u64 *)&node_id[8]; + if (!attrs[TIPC_NLA_NET_NODEID_W1]) + return -EINVAL; *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); tipc_net_init(net, node_id, 0); diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index b76f13f6fea1..6ff2254088f6 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -79,7 +79,10 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, - [TIPC_NLA_NET_ID] = { .type = NLA_U32 } + [TIPC_NLA_NET_ID] = { .type = NLA_U32 }, + [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 }, + [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 }, + [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 }, }; const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { diff --git a/net/tipc/node.c b/net/tipc/node.c index c77dd2f3c589..f29549de9245 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1950,6 +1950,7 @@ out: int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; struct tipc_nl_msg msg; char *name; int err; @@ -1957,9 +1958,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) msg.portid = info->snd_portid; msg.seq = info->snd_seq; - if (!info->attrs[TIPC_NLA_LINK_NAME]) + if (!info->attrs[TIPC_NLA_LINK]) return -EINVAL; - name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); + + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, + info->attrs[TIPC_NLA_LINK], + tipc_nl_link_policy, info->extack); + if (err) + return err; + + if (!attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + name = nla_data(attrs[TIPC_NLA_LINK_NAME]); msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg.skb) @@ -2232,8 +2243,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) struct net *net = sock_net(skb->sk); u32 prev_bearer = cb->args[0]; struct tipc_nl_msg msg; + int bearer_id; int err; - int i; if (prev_bearer == MAX_BEARERS) return 0; @@ -2243,16 +2254,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) msg.seq = cb->nlh->nlmsg_seq; rtnl_lock(); - for (i = prev_bearer; i < MAX_BEARERS; i++) { - prev_bearer = i; - err = __tipc_nl_add_monitor(net, &msg, prev_bearer); + for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { + err = __tipc_nl_add_monitor(net, &msg, bearer_id); if (err) - goto out; + break; } - -out: rtnl_unlock(); - cb->args[0] = prev_bearer; + cb->args[0] = bearer_id; return skb->len; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1fd1c8b5ce03..6be21575503a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1278,7 +1278,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) struct tipc_msg *hdr = &tsk->phdr; struct tipc_name_seq *seq; struct sk_buff_head pkts; - u32 dnode, dport; + u32 dport, dnode = 0; u32 type, inst; int mtu, rc; @@ -1348,6 +1348,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) msg_set_destnode(hdr, dnode); msg_set_destport(hdr, dest->addr.id.ref); msg_set_hdr_sz(hdr, BASIC_H_SIZE); + } else { + return -EINVAL; } /* Block or return if destination link is congested */ @@ -1514,10 +1516,10 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) srcaddr->sock.family = AF_TIPC; srcaddr->sock.addrtype = TIPC_ADDR_ID; + srcaddr->sock.scope = 0; srcaddr->sock.addr.id.ref = msg_origport(hdr); srcaddr->sock.addr.id.node = msg_orignode(hdr); srcaddr->sock.addr.name.domain = 0; - srcaddr->sock.scope = 0; m->msg_namelen = sizeof(struct sockaddr_tipc); if (!msg_in_group(hdr)) @@ -1526,6 +1528,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) /* Group message users may also want to know sending member's id */ srcaddr->member.family = AF_TIPC; srcaddr->member.addrtype = TIPC_ADDR_NAME; + srcaddr->member.scope = 0; srcaddr->member.addr.name.name.type = msg_nametype(hdr); srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; srcaddr->member.addr.name.domain = 0; diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index b7d80bc5f4ab..f340e53da625 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -153,7 +153,10 @@ struct tipc_subscription *tipc_sub_subscribe(struct net *net, memcpy(&sub->evt.s, s, sizeof(*s)); spin_lock_init(&sub->lock); kref_init(&sub->kref); - tipc_nametbl_subscribe(sub); + if (!tipc_nametbl_subscribe(sub)) { + kfree(sub); + return NULL; + } timer_setup(&sub->timer, tipc_sub_timeout, 0); timeout = tipc_sub_read(&sub->evt.s, timeout); if (timeout != TIPC_WAIT_FOREVER) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 0d379970960e..20cd93be6236 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk, size = sg->length - offset; offset += sg->offset; + ctx->in_tcp_sendpages = true; while (1) { if (sg_is_last(sg)) sendpage_flags = flags; @@ -134,6 +135,7 @@ retry: offset -= sg->offset; ctx->partially_sent_offset = offset; ctx->partially_sent_record = (void *)sg; + ctx->in_tcp_sendpages = false; return ret; } @@ -148,6 +150,8 @@ retry: } clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); + ctx->in_tcp_sendpages = false; + ctx->sk_write_space(sk); return 0; } @@ -217,6 +221,10 @@ static void tls_write_space(struct sock *sk) { struct tls_context *ctx = tls_get_ctx(sk); + /* We are already sending pages, ignore notification */ + if (ctx->in_tcp_sendpages) + return; + if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { gfp_t sk_allocation = sk->sk_allocation; int rc; @@ -241,16 +249,13 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) struct tls_context *ctx = tls_get_ctx(sk); long timeo = sock_sndtimeo(sk, 0); void (*sk_proto_close)(struct sock *sk, long timeout); + bool free_ctx = false; lock_sock(sk); sk_proto_close = ctx->sk_proto_close; - if (ctx->conf == TLS_HW_RECORD) - goto skip_tx_cleanup; - - if (ctx->conf == TLS_BASE) { - kfree(ctx); - ctx = NULL; + if (ctx->conf == TLS_BASE || ctx->conf == TLS_HW_RECORD) { + free_ctx = true; goto skip_tx_cleanup; } @@ -287,7 +292,7 @@ skip_tx_cleanup: /* free ctx for TLS_HW_RECORD, used by tcp_set_state * for sk->sk_prot->unhash [tls_hw_unhash] */ - if (ctx && ctx->conf == TLS_HW_RECORD) + if (free_ctx) kfree(ctx); } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4dc766b03f00..71e79597f940 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -41,6 +41,8 @@ #include <net/strparser.h> #include <net/tls.h> +#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE + static int tls_do_decryption(struct sock *sk, struct scatterlist *sgin, struct scatterlist *sgout, @@ -673,7 +675,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); - char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size]; + char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; struct scatterlist *sgin = &sgin_arr[0]; struct strp_msg *rxm = strp_msg(skb); @@ -1094,6 +1096,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) goto free_priv; } + /* Sanity-check the IV size for stack allocations. */ + if (iv_size > MAX_IV_SIZE) { + rc = -EINVAL; + goto free_priv; + } + cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; cctx->tag_size = tag_size; cctx->overhead_size = cctx->prepend_size + cctx->tag_size; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index aac9b8f6552e..c1076c19b858 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void) } EXPORT_SYMBOL_GPL(vsock_core_get_transport); +static void __exit vsock_exit(void) +{ + /* Do nothing. This function makes this module removable. */ +} + module_init(vsock_init_tables); +module_exit(vsock_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Socket Family"); diff --git a/net/wireless/core.c b/net/wireless/core.c index a6f3cac8c640..c0fd8a85e7f7 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev, ASSERT_RTNL(); + if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN) + return -EINVAL; + /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ff28f8feeb09..a052693c2e85 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9214,6 +9214,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + kzfree(connkeys); GENL_SET_ERR_MSG(info, "external auth requires connection ownership"); return -EINVAL; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 16c7e4ef5820..ac3e12c32aa3 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1026,6 +1026,7 @@ static int regdb_query_country(const struct fwdb_header *db, if (!tmp_rd) { kfree(regdom); + kfree(wmm_ptrs); return -ENOMEM; } regdom = tmp_rd; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f9d2f2233f09..6c177ae7a6d9 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2175,6 +2175,12 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) return afinfo; } +void xfrm_flush_gc(void) +{ + flush_work(&xfrm_state_gc_work); +} +EXPORT_SYMBOL(xfrm_flush_gc); + /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ void xfrm_state_delete_tunnel(struct xfrm_state *x) { diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index 830c55514f9f..49b13553eaae 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c @@ -56,6 +56,21 @@ struct dummy { unsigned long jiffies_expire; }; +/* + * The constructor makes more sense together with klp_shadow_get_or_alloc(). + * In this example, it would be safe to assign the pointer also to the shadow + * variable returned by klp_shadow_alloc(). But we wanted to show the more + * complicated use of the API. + */ +static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data) +{ + void **shadow_leak = shadow_data; + void *leak = ctor_data; + + *shadow_leak = leak; + return 0; +} + struct dummy *livepatch_fix1_dummy_alloc(void) { struct dummy *d; @@ -74,7 +89,8 @@ struct dummy *livepatch_fix1_dummy_alloc(void) * pointer to handle resource release. */ leak = kzalloc(sizeof(int), GFP_KERNEL); - klp_shadow_alloc(d, SV_LEAK, &leak, sizeof(leak), GFP_KERNEL); + klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL, + shadow_leak_ctor, leak); pr_info("%s: dummy @ %p, expires @ %lx\n", __func__, d, d->jiffies_expire); @@ -82,9 +98,19 @@ struct dummy *livepatch_fix1_dummy_alloc(void) return d; } +static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data) +{ + void *d = obj; + void **shadow_leak = shadow_data; + + kfree(*shadow_leak); + pr_info("%s: dummy @ %p, prevented leak @ %p\n", + __func__, d, *shadow_leak); +} + void livepatch_fix1_dummy_free(struct dummy *d) { - void **shadow_leak, *leak; + void **shadow_leak; /* * Patch: fetch the saved SV_LEAK shadow variable, detach and @@ -93,15 +119,10 @@ void livepatch_fix1_dummy_free(struct dummy *d) * was loaded.) */ shadow_leak = klp_shadow_get(d, SV_LEAK); - if (shadow_leak) { - leak = *shadow_leak; - klp_shadow_free(d, SV_LEAK); - kfree(leak); - pr_info("%s: dummy @ %p, prevented leak @ %p\n", - __func__, d, leak); - } else { + if (shadow_leak) + klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor); + else pr_info("%s: dummy @ %p leaked!\n", __func__, d); - } kfree(d); } @@ -147,7 +168,7 @@ static int livepatch_shadow_fix1_init(void) static void livepatch_shadow_fix1_exit(void) { /* Cleanup any existing SV_LEAK shadow variables */ - klp_shadow_free_all(SV_LEAK); + klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor); WARN_ON(klp_unregister_patch(&patch)); } diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c index ff9948f0ec00..b34c7bf83356 100644 --- a/samples/livepatch/livepatch-shadow-fix2.c +++ b/samples/livepatch/livepatch-shadow-fix2.c @@ -53,39 +53,42 @@ struct dummy { bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) { int *shadow_count; - int count; /* * Patch: handle in-flight dummy structures, if they do not * already have a SV_COUNTER shadow variable, then attach a * new one. */ - count = 0; shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, - &count, sizeof(count), - GFP_NOWAIT); + sizeof(*shadow_count), GFP_NOWAIT, + NULL, NULL); if (shadow_count) *shadow_count += 1; return time_after(jiffies, d->jiffies_expire); } +static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data) +{ + void *d = obj; + void **shadow_leak = shadow_data; + + kfree(*shadow_leak); + pr_info("%s: dummy @ %p, prevented leak @ %p\n", + __func__, d, *shadow_leak); +} + void livepatch_fix2_dummy_free(struct dummy *d) { - void **shadow_leak, *leak; + void **shadow_leak; int *shadow_count; /* Patch: copy the memory leak patch from the fix1 module. */ shadow_leak = klp_shadow_get(d, SV_LEAK); - if (shadow_leak) { - leak = *shadow_leak; - klp_shadow_free(d, SV_LEAK); - kfree(leak); - pr_info("%s: dummy @ %p, prevented leak @ %p\n", - __func__, d, leak); - } else { + if (shadow_leak) + klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor); + else pr_info("%s: dummy @ %p leaked!\n", __func__, d); - } /* * Patch: fetch the SV_COUNTER shadow variable and display @@ -95,7 +98,7 @@ void livepatch_fix2_dummy_free(struct dummy *d) if (shadow_count) { pr_info("%s: dummy @ %p, check counter = %d\n", __func__, d, *shadow_count); - klp_shadow_free(d, SV_COUNTER); + klp_shadow_free(d, SV_COUNTER, NULL); } kfree(d); @@ -142,7 +145,7 @@ static int livepatch_shadow_fix2_init(void) static void livepatch_shadow_fix2_exit(void) { /* Cleanup any existing SV_COUNTER shadow variables */ - klp_shadow_free_all(SV_COUNTER); + klp_shadow_free_all(SV_COUNTER, NULL); WARN_ON(klp_unregister_patch(&patch)); } diff --git a/samples/sockmap/Makefile b/samples/sockmap/Makefile index 9bf2881bd11b..fa53f4d77834 100644 --- a/samples/sockmap/Makefile +++ b/samples/sockmap/Makefile @@ -65,11 +65,14 @@ $(src)/*.c: verify_target_bpf # asm/sysreg.h - inline assembly used by it is incompatible with llvm. # But, there is no easy way to fix it, so just exclude it since it is # useless for BPF samples. +# +# -target bpf option required with SK_MSG programs, this is to ensure +# reading 'void *' data types for data and data_end are __u64 reads. $(obj)/%.o: $(src)/%.c $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ -Wno-compare-distinct-pointer-types \ -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ - -Wno-unknown-warning-option \ - -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ + -Wno-unknown-warning-option -O2 -target bpf \ + -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index b2a95af7df18..7f5c86246138 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -14,7 +14,7 @@ ifdef CONFIG_GCC_PLUGINS endif ifdef CONFIG_GCC_PLUGIN_SANCOV - ifeq ($(CFLAGS_KCOV),) + ifeq ($(strip $(CFLAGS_KCOV)),) # It is needed because of the gcc-plugin.sh and gcc version checks. gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV) += sancov_plugin.so diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 07d07409f16f..5af34a2b0cd9 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -196,7 +196,7 @@ $(obj)/%.tab.c: $(src)/%.y FORCE $(call if_changed,bison) quiet_cmd_bison_h = YACC $@ - cmd_bison_h = bison -o/dev/null --defines=$@ -t -l $< + cmd_bison_h = $(YACC) -o/dev/null --defines=$@ -t -l $< $(obj)/%.tab.h: $(src)/%.y FORCE $(call if_changed,bison_h) diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c index c07ba4da9e36..815eaf140ab5 100644 --- a/scripts/dtc/checks.c +++ b/scripts/dtc/checks.c @@ -787,10 +787,9 @@ static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node * FAIL(c, dti, node, "incorrect #size-cells for PCI bridge"); prop = get_property(node, "bus-range"); - if (!prop) { - FAIL(c, dti, node, "missing bus-range for PCI bridge"); + if (!prop) return; - } + if (prop->val.len != (sizeof(cell_t) * 2)) { FAIL_PROP(c, dti, node, prop, "value must be 2 cells"); return; diff --git a/scripts/extract_xc3028.pl b/scripts/extract_xc3028.pl index 61d9b256c658..a1c51b7e4baf 100755 --- a/scripts/extract_xc3028.pl +++ b/scripts/extract_xc3028.pl @@ -1,6 +1,6 @@ #!/usr/bin/env perl -# Copyright (c) Mauro Carvalho Chehab <mchehab@infradead.org> +# Copyright (c) Mauro Carvalho Chehab <mchehab@kernel.org> # Released under GPLv2 # # In order to use, you need to: diff --git a/scripts/faddr2line b/scripts/faddr2line index 9e5735a4d3a5..1876a741087c 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -170,7 +170,10 @@ __faddr2line() { echo "$file_lines" | while read -r line do echo $line - eval $(echo $line | awk -F "[ :]" '{printf("n1=%d;n2=%d;f=%s",$NF-5, $NF+5, $(NF-1))}') + n=$(echo $line | sed 's/.*:\([0-9]\+\).*/\1/g') + n1=$[$n-5] + n2=$[$n+5] + f=$(echo $line | sed 's/.*at \(.\+\):.*/\1/g') awk 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") {printf("%d\t%s\n", NR, $0)}' $f done diff --git a/scripts/genksyms/Makefile b/scripts/genksyms/Makefile index ef0287e42957..03b7ce97de14 100644 --- a/scripts/genksyms/Makefile +++ b/scripts/genksyms/Makefile @@ -14,14 +14,14 @@ genksyms-objs := genksyms.o parse.tab.o lex.lex.o # so that 'bison: not found' will be displayed if it is missing. ifeq ($(findstring 1,$(KBUILD_ENABLE_EXTRA_GCC_CHECKS)),) -quiet_cmd_bison_no_warn = $(quet_cmd_bison) +quiet_cmd_bison_no_warn = $(quiet_cmd_bison) cmd_bison_no_warn = $(YACC) --version >/dev/null; \ $(cmd_bison) 2>/dev/null $(obj)/parse.tab.c: $(src)/parse.y FORCE $(call if_changed,bison_no_warn) -quiet_cmd_bison_h_no_warn = $(quet_cmd_bison_h) +quiet_cmd_bison_h_no_warn = $(quiet_cmd_bison_h) cmd_bison_h_no_warn = $(YACC) --version >/dev/null; \ $(cmd_bison_h) 2>/dev/null diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c index 944418da9fe3..0f6dcb4011a8 100644 --- a/scripts/mod/sumversion.c +++ b/scripts/mod/sumversion.c @@ -330,14 +330,7 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md) goto out; } - /* There will be a line like so: - deps_drivers/net/dummy.o := \ - drivers/net/dummy.c \ - $(wildcard include/config/net/fastroute.h) \ - include/linux/module.h \ - - Sum all files in the same dir or subdirs. - */ + /* Sum all files in the same dir or subdirs. */ while ((line = get_next_line(&pos, file, flen)) != NULL) { char* p = line; diff --git a/scripts/split-man.pl b/scripts/split-man.pl index bfe16cbe42df..c3db607ee9ec 100755 --- a/scripts/split-man.pl +++ b/scripts/split-man.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # SPDX-License-Identifier: GPL-2.0 # -# Author: Mauro Carvalho Chehab <mchehab@s-opensource.com> +# Author: Mauro Carvalho Chehab <mchehab+samsung@kernel.org> # # Produce manpages from kernel-doc. # See Documentation/doc-guide/kernel-doc.rst for instructions diff --git a/security/commoncap.c b/security/commoncap.c index 48620c93d697..1ce701fcb3f3 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, magic |= VFS_CAP_FLAGS_EFFECTIVE; memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); cap->magic_etc = cpu_to_le32(magic); + } else { + size = -ENOMEM; } } kfree(tmpbuf); diff --git a/sound/core/control.c b/sound/core/control.c index 69734b0eafd0..9aa15bfc7936 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1492,7 +1492,7 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, int op_flag) { struct snd_ctl_tlv header; - unsigned int *container; + unsigned int __user *container; unsigned int container_size; struct snd_kcontrol *kctl; struct snd_ctl_elem_id id; diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c index b719d0bd833e..6491afbb5fd5 100644 --- a/sound/core/pcm_compat.c +++ b/sound/core/pcm_compat.c @@ -27,10 +27,11 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream, s32 __user *src) { snd_pcm_sframes_t delay; + int err; - delay = snd_pcm_delay(substream); - if (delay < 0) - return delay; + err = snd_pcm_delay(substream, &delay); + if (err) + return err; if (put_user(delay, src)) return -EFAULT; return 0; @@ -422,6 +423,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream, return -ENOTTY; if (substream->stream != dir) return -EINVAL; + if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) + return -EBADFD; if ((ch = substream->runtime->channels) > 128) return -EINVAL; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 35ffccea94c3..0e875d5a9e86 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -2692,7 +2692,8 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream) return err; } -static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream) +static int snd_pcm_delay(struct snd_pcm_substream *substream, + snd_pcm_sframes_t *delay) { struct snd_pcm_runtime *runtime = substream->runtime; int err; @@ -2708,7 +2709,9 @@ static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream) n += runtime->delay; } snd_pcm_stream_unlock_irq(substream); - return err < 0 ? err : n; + if (!err) + *delay = n; + return err; } static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, @@ -2751,6 +2754,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, sync_ptr.s.status.hw_ptr = status->hw_ptr; sync_ptr.s.status.tstamp = status->tstamp; sync_ptr.s.status.suspended_state = status->suspended_state; + sync_ptr.s.status.audio_tstamp = status->audio_tstamp; snd_pcm_stream_unlock_irq(substream); if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) return -EFAULT; @@ -2916,11 +2920,13 @@ static int snd_pcm_common_ioctl(struct file *file, return snd_pcm_hwsync(substream); case SNDRV_PCM_IOCTL_DELAY: { - snd_pcm_sframes_t delay = snd_pcm_delay(substream); + snd_pcm_sframes_t delay; snd_pcm_sframes_t __user *res = arg; + int err; - if (delay < 0) - return delay; + err = snd_pcm_delay(substream, &delay); + if (err) + return err; if (put_user(delay, res)) return -EFAULT; return 0; @@ -3008,13 +3014,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, case SNDRV_PCM_IOCTL_DROP: return snd_pcm_drop(substream); case SNDRV_PCM_IOCTL_DELAY: - { - result = snd_pcm_delay(substream); - if (result < 0) - return result; - *frames = result; - return 0; - } + return snd_pcm_delay(substream, frames); default: return -EINVAL; } @@ -3234,7 +3234,7 @@ static __poll_t snd_pcm_capture_poll(struct file *file, poll_table * wait) /* * mmap status record */ -static int snd_pcm_mmap_status_fault(struct vm_fault *vmf) +static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf) { struct snd_pcm_substream *substream = vmf->vma->vm_private_data; struct snd_pcm_runtime *runtime; @@ -3270,7 +3270,7 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file /* * mmap control record */ -static int snd_pcm_mmap_control_fault(struct vm_fault *vmf) +static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf) { struct snd_pcm_substream *substream = vmf->vma->vm_private_data; struct snd_pcm_runtime *runtime; @@ -3359,7 +3359,7 @@ snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) /* * fault callback for mmapping a RAM page */ -static int snd_pcm_mmap_data_fault(struct vm_fault *vmf) +static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) { struct snd_pcm_substream *substream = vmf->vma->vm_private_data; struct snd_pcm_runtime *runtime; diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c index f69764d7cdd7..e30e30ba6e39 100644 --- a/sound/core/rawmidi_compat.c +++ b/sound/core/rawmidi_compat.c @@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, struct snd_rawmidi_params params; unsigned int val; - if (rfile->output == NULL) - return -EINVAL; if (get_user(params.stream, &src->stream) || get_user(params.buffer_size, &src->buffer_size) || get_user(params.avail_min, &src->avail_min) || @@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, params.no_active_sensing = val; switch (params.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: + if (!rfile->output) + return -EINVAL; return snd_rawmidi_output_params(rfile->output, ¶ms); case SNDRV_RAWMIDI_STREAM_INPUT: + if (!rfile->input) + return -EINVAL; return snd_rawmidi_input_params(rfile->input, ¶ms); } return -EINVAL; @@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, int err; struct snd_rawmidi_status status; - if (rfile->output == NULL) - return -EINVAL; if (get_user(status.stream, &src->stream)) return -EFAULT; switch (status.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: + if (!rfile->output) + return -EINVAL; err = snd_rawmidi_output_status(rfile->output, &status); break; case SNDRV_RAWMIDI_STREAM_INPUT: + if (!rfile->input) + return -EINVAL; err = snd_rawmidi_input_status(rfile->input, &status); break; default: @@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, int err; struct snd_rawmidi_status status; - if (rfile->output == NULL) - return -EINVAL; if (get_user(status.stream, &src->stream)) return -EFAULT; switch (status.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: + if (!rfile->output) + return -EINVAL; err = snd_rawmidi_output_status(rfile->output, &status); break; case SNDRV_RAWMIDI_STREAM_INPUT: + if (!rfile->input) + return -EINVAL; err = snd_rawmidi_input_status(rfile->input, &status); break; default: diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c index c3908862bc8b..86ca584c27b2 100644 --- a/sound/core/seq/oss/seq_oss_event.c +++ b/sound/core/seq/oss/seq_oss_event.c @@ -26,6 +26,7 @@ #include <sound/seq_oss_legacy.h> #include "seq_oss_readq.h" #include "seq_oss_writeq.h" +#include <linux/nospec.h> /* @@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st { struct seq_oss_synthinfo *info; - if (!snd_seq_oss_synth_is_valid(dp, dev)) + info = snd_seq_oss_synth_info(dp, dev); + if (!info) return -ENXIO; - info = &dp->synths[dev]; switch (info->arg.event_passing) { case SNDRV_SEQ_OSS_PROCESS_EVENTS: if (! info->ch || ch < 0 || ch >= info->nr_voices) { @@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); } + ch = array_index_nospec(ch, info->nr_voices); if (note == 255 && info->ch[ch].note >= 0) { /* volume control */ int type; @@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s { struct seq_oss_synthinfo *info; - if (!snd_seq_oss_synth_is_valid(dp, dev)) + info = snd_seq_oss_synth_info(dp, dev); + if (!info) return -ENXIO; - info = &dp->synths[dev]; switch (info->arg.event_passing) { case SNDRV_SEQ_OSS_PROCESS_EVENTS: if (! info->ch || ch < 0 || ch >= info->nr_voices) { @@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); } + ch = array_index_nospec(ch, info->nr_voices); if (info->ch[ch].note >= 0) { note = info->ch[ch].note; info->ch[ch].vel = 0; @@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s static int set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) { - if (! snd_seq_oss_synth_is_valid(dp, dev)) + if (!snd_seq_oss_synth_info(dp, dev)) return -ENXIO; ev->type = type; @@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, static int set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) { - if (! snd_seq_oss_synth_is_valid(dp, dev)) + if (!snd_seq_oss_synth_info(dp, dev)) return -ENXIO; ev->type = type; diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c index b30b2139e3f0..9debd1b8fd28 100644 --- a/sound/core/seq/oss/seq_oss_midi.c +++ b/sound/core/seq/oss/seq_oss_midi.c @@ -29,6 +29,7 @@ #include "../seq_lock.h" #include <linux/init.h> #include <linux/slab.h> +#include <linux/nospec.h> /* @@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev) { if (dev < 0 || dev >= dp->max_mididev) return NULL; + dev = array_index_nospec(dev, dp->max_mididev); return get_mdev(dev); } diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c index cd0e0ebbfdb1..278ebb993122 100644 --- a/sound/core/seq/oss/seq_oss_synth.c +++ b/sound/core/seq/oss/seq_oss_synth.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/nospec.h> /* * constants @@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp) dp->max_synthdev = 0; } -/* - * check if the specified device is MIDI mapped device - */ -static int -is_midi_dev(struct seq_oss_devinfo *dp, int dev) +static struct seq_oss_synthinfo * +get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev) { if (dev < 0 || dev >= dp->max_synthdev) - return 0; - if (dp->synths[dev].is_midi) - return 1; - return 0; + return NULL; + dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS); + return &dp->synths[dev]; } /* @@ -359,14 +356,20 @@ static struct seq_oss_synth * get_synthdev(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; - if (dev < 0 || dev >= dp->max_synthdev) - return NULL; - if (! dp->synths[dev].opened) + struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); + + if (!info) return NULL; - if (dp->synths[dev].is_midi) - return &midi_synth_dev; - if ((rec = get_sdev(dev)) == NULL) + if (!info->opened) return NULL; + if (info->is_midi) { + rec = &midi_synth_dev; + snd_use_lock_use(&rec->use_lock); + } else { + rec = get_sdev(dev); + if (!rec) + return NULL; + } if (! rec->opened) { snd_use_lock_free(&rec->use_lock); return NULL; @@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev) struct seq_oss_synth *rec; struct seq_oss_synthinfo *info; - if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) - return; - info = &dp->synths[dev]; - if (! info->opened) + info = get_synthinfo_nospec(dp, dev); + if (!info || !info->opened) return; if (info->sysex) info->sysex->len = 0; /* reset sysex */ @@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, const char __user *buf, int p, int c) { struct seq_oss_synth *rec; + struct seq_oss_synthinfo *info; int rc; - if (dev < 0 || dev >= dp->max_synthdev) + info = get_synthinfo_nospec(dp, dev); + if (!info) return -ENXIO; - if (is_midi_dev(dp, dev)) + if (info->is_midi) return 0; if ((rec = get_synthdev(dp, dev)) == NULL) return -ENXIO; @@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, if (rec->oper.load_patch == NULL) rc = -ENXIO; else - rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); + rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c); snd_use_lock_free(&rec->use_lock); return rc; } /* - * check if the device is valid synth device + * check if the device is valid synth device and return the synth info */ -int -snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) +struct seq_oss_synthinfo * +snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_synth *rec; + rec = get_synthdev(dp, dev); if (rec) { snd_use_lock_free(&rec->use_lock); - return 1; + return get_synthinfo_nospec(dp, dev); } - return 0; + return NULL; } @@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, int i, send; unsigned char *dest; struct seq_oss_synth_sysex *sysex; + struct seq_oss_synthinfo *info; - if (! snd_seq_oss_synth_is_valid(dp, dev)) + info = snd_seq_oss_synth_info(dp, dev); + if (!info) return -ENXIO; - sysex = dp->synths[dev].sysex; + sysex = info->sysex; if (sysex == NULL) { sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); if (sysex == NULL) return -ENOMEM; - dp->synths[dev].sysex = sysex; + info->sysex = sysex; } send = 0; @@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) { - if (! snd_seq_oss_synth_is_valid(dp, dev)) + struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev); + + if (!info) return -EINVAL; - snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, - dp->synths[dev].arg.addr.port); + snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client, + info->arg.addr.port); return 0; } @@ -568,16 +576,18 @@ int snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) { struct seq_oss_synth *rec; + struct seq_oss_synthinfo *info; int rc; - if (is_midi_dev(dp, dev)) + info = get_synthinfo_nospec(dp, dev); + if (!info || info->is_midi) return -ENXIO; if ((rec = get_synthdev(dp, dev)) == NULL) return -ENXIO; if (rec->oper.ioctl == NULL) rc = -ENXIO; else - rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); + rc = rec->oper.ioctl(&info->arg, cmd, addr); snd_use_lock_free(&rec->use_lock); return rc; } @@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u int snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) { - if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) + struct seq_oss_synthinfo *info; + + info = snd_seq_oss_synth_info(dp, dev); + if (!info || info->is_midi) return -ENXIO; ev->type = SNDRV_SEQ_EVENT_OSS; memcpy(ev->data.raw8.d, data, 8); diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h index 74ac55f166b6..a63f9e22974d 100644 --- a/sound/core/seq/oss/seq_oss_synth.h +++ b/sound/core/seq/oss/seq_oss_synth.h @@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp); void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev); int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, const char __user *buf, int p, int c); -int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev); +struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, + int dev); int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, struct snd_seq_event *ev); int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev); diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c index f48a4cd24ffc..289ae6bb81d9 100644 --- a/sound/core/seq/seq_virmidi.c +++ b/sound/core/seq/seq_virmidi.c @@ -174,12 +174,12 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, } return; } + spin_lock_irqsave(&substream->runtime->lock, flags); if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) { if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0) - return; + goto out; vmidi->event.type = SNDRV_SEQ_EVENT_NONE; } - spin_lock_irqsave(&substream->runtime->lock, flags); while (1) { count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf)); if (count <= 0) diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 58e349fc893f..eab7f594ebe7 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -831,9 +831,11 @@ static int loopback_rate_shift_get(struct snd_kcontrol *kcontrol, { struct loopback *loopback = snd_kcontrol_chip(kcontrol); + mutex_lock(&loopback->cable_lock); ucontrol->value.integer.value[0] = loopback->setup[kcontrol->id.subdevice] [kcontrol->id.device].rate_shift; + mutex_unlock(&loopback->cable_lock); return 0; } @@ -865,9 +867,11 @@ static int loopback_notify_get(struct snd_kcontrol *kcontrol, { struct loopback *loopback = snd_kcontrol_chip(kcontrol); + mutex_lock(&loopback->cable_lock); ucontrol->value.integer.value[0] = loopback->setup[kcontrol->id.subdevice] [kcontrol->id.device].notify; + mutex_unlock(&loopback->cable_lock); return 0; } @@ -879,12 +883,14 @@ static int loopback_notify_put(struct snd_kcontrol *kcontrol, int change = 0; val = ucontrol->value.integer.value[0] ? 1 : 0; + mutex_lock(&loopback->cable_lock); if (val != loopback->setup[kcontrol->id.subdevice] [kcontrol->id.device].notify) { loopback->setup[kcontrol->id.subdevice] [kcontrol->id.device].notify = val; change = 1; } + mutex_unlock(&loopback->cable_lock); return change; } @@ -892,15 +898,18 @@ static int loopback_active_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct loopback *loopback = snd_kcontrol_chip(kcontrol); - struct loopback_cable *cable = loopback->cables - [kcontrol->id.subdevice][kcontrol->id.device ^ 1]; + struct loopback_cable *cable; + unsigned int val = 0; + mutex_lock(&loopback->cable_lock); + cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1]; if (cable != NULL) { unsigned int running = cable->running ^ cable->pause; val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0; } + mutex_unlock(&loopback->cable_lock); ucontrol->value.integer.value[0] = val; return 0; } @@ -943,9 +952,11 @@ static int loopback_rate_get(struct snd_kcontrol *kcontrol, { struct loopback *loopback = snd_kcontrol_chip(kcontrol); + mutex_lock(&loopback->cable_lock); ucontrol->value.integer.value[0] = loopback->setup[kcontrol->id.subdevice] [kcontrol->id.device].rate; + mutex_unlock(&loopback->cable_lock); return 0; } @@ -965,9 +976,11 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol, { struct loopback *loopback = snd_kcontrol_chip(kcontrol); + mutex_lock(&loopback->cable_lock); ucontrol->value.integer.value[0] = loopback->setup[kcontrol->id.subdevice] [kcontrol->id.device].channels; + mutex_unlock(&loopback->cable_lock); return 0; } diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c index ddcc1a325a61..42920a243328 100644 --- a/sound/drivers/opl3/opl3_synth.c +++ b/sound/drivers/opl3/opl3_synth.c @@ -21,6 +21,7 @@ #include <linux/slab.h> #include <linux/export.h> +#include <linux/nospec.h> #include <sound/opl3.h> #include <sound/asound_fm.h> @@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v { unsigned short reg_side; unsigned char op_offset; - unsigned char voice_offset; + unsigned char voice_offset, voice_op; unsigned short opl3_reg; unsigned char reg_val; @@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v voice_offset = voice->voice - MAX_OPL2_VOICES; } /* Get register offset of operator */ - op_offset = snd_opl3_regmap[voice_offset][voice->op]; + voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES); + voice_op = array_index_nospec(voice->op, 4); + op_offset = snd_opl3_regmap[voice_offset][voice_op]; reg_val = 0x00; /* Set amplitude modulation (tremolo) effect */ diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 4a1dc145327b..cb9acfe60f6a 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -773,8 +773,6 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context, u32 cycle; unsigned int packets; - s->max_payload_length = amdtp_stream_get_max_payload(s); - /* * For in-stream, first packet has come. * For out-stream, prepared to transmit first packet @@ -879,6 +877,9 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed) amdtp_stream_update(s); + if (s->direction == AMDTP_IN_STREAM) + s->max_payload_length = amdtp_stream_get_max_payload(s); + if (s->flags & CIP_NO_HEADER) s->tag = TAG_NO_CIP_HEADER; else diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c index 8573289c381e..928a255bfc35 100644 --- a/sound/firewire/dice/dice-stream.c +++ b/sound/firewire/dice/dice-stream.c @@ -435,7 +435,7 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice) err = init_stream(dice, AMDTP_IN_STREAM, i); if (err < 0) { for (; i >= 0; i--) - destroy_stream(dice, AMDTP_OUT_STREAM, i); + destroy_stream(dice, AMDTP_IN_STREAM, i); goto end; } } diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 4ddb4cdd054b..96bb01b6b751 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c @@ -14,7 +14,7 @@ MODULE_LICENSE("GPL v2"); #define OUI_WEISS 0x001c6a #define OUI_LOUD 0x000ff2 #define OUI_FOCUSRITE 0x00130e -#define OUI_TCELECTRONIC 0x001486 +#define OUI_TCELECTRONIC 0x000166 #define DICE_CATEGORY_ID 0x04 #define WEISS_CATEGORY_ID 0x00 diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c index 7eb617175fde..a31a70dccecf 100644 --- a/sound/pci/asihpi/hpimsginit.c +++ b/sound/pci/asihpi/hpimsginit.c @@ -23,6 +23,7 @@ #include "hpi_internal.h" #include "hpimsginit.h" +#include <linux/nospec.h> /* The actual message size for each object type */ static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT; @@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object, { u16 size; - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) { + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1); size = msg_size[object]; - else + } else { size = sizeof(*phm); + } memset(phm, 0, size); phm->size = size; @@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function, { u16 size; - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) { + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1); size = res_size[object]; - else + } else { size = sizeof(*phr); + } memset(phr, 0, sizeof(*phr)); phr->size = size; diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 5badd08e1d69..b1a2a7ea4172 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -33,6 +33,7 @@ #include <linux/stringify.h> #include <linux/module.h> #include <linux/vmalloc.h> +#include <linux/nospec.h> #ifdef MODULE_FIRMWARE MODULE_FIRMWARE("asihpi/dsp5000.bin"); @@ -186,7 +187,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct hpi_adapter *pa = NULL; if (hm->h.adapter_index < ARRAY_SIZE(adapters)) - pa = &adapters[hm->h.adapter_index]; + pa = &adapters[array_index_nospec(hm->h.adapter_index, + ARRAY_SIZE(adapters))]; if (!pa || !pa->adapter || !pa->adapter->type) { hpi_init_response(&hr->r0, hm->h.object, diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c index 57df06e76968..cc009a4a3d1d 100644 --- a/sound/pci/hda/hda_hwdep.c +++ b/sound/pci/hda/hda_hwdep.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/compat.h> +#include <linux/nospec.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" @@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec, if (get_user(verb, &arg->verb)) return -EFAULT; - res = get_wcaps(codec, verb >> 24); + /* open-code get_wcaps(verb>>24) with nospec */ + verb >>= 24; + if (verb < codec->core.start_nid || + verb >= codec->core.start_nid + codec->core.num_nodes) { + res = 0; + } else { + verb -= codec->core.start_nid; + verb = array_index_nospec(verb, codec->core.num_nodes); + res = codec->wcaps[verb]; + } if (put_user(res, &arg->res)) return -EFAULT; return 0; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 7a111a1b5836..b0c8c79848a9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1647,7 +1647,8 @@ static void azx_check_snoop_available(struct azx *chip) */ u8 val; pci_read_config_byte(chip->pci, 0x42, &val); - if (!(val & 0x80) && chip->pci->revision == 0x30) + if (!(val & 0x80) && (chip->pci->revision == 0x30 || + chip->pci->revision == 0x20)) snoop = false; } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index b4f1b6e88305..7d7eb1354eee 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1383,6 +1383,8 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec, pcm = get_pcm_rec(spec, per_pin->pcm_idx); else return; + if (!pcm->pcm) + return; if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use)) return; @@ -2151,8 +2153,13 @@ static int generic_hdmi_build_controls(struct hda_codec *codec) int dev, err; int pin_idx, pcm_idx; - for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) { + if (!get_pcm_rec(spec, pcm_idx)->pcm) { + /* no PCM: mark this for skipping permanently */ + set_bit(pcm_idx, &spec->pcm_bitmap); + continue; + } + err = generic_hdmi_build_jack(codec, pcm_idx); if (err < 0) return err; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index aef1f52db7d9..2dd34dd77447 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -331,6 +331,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) /* fallthrough */ case 0x10ec0215: case 0x10ec0233: + case 0x10ec0235: case 0x10ec0236: case 0x10ec0255: case 0x10ec0256: @@ -3831,7 +3832,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec, } } -#if IS_REACHABLE(INPUT) +#if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) { @@ -6370,6 +6371,8 @@ static const struct hda_fixup alc269_fixups[] = { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC }, }; @@ -6573,6 +6576,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), @@ -7157,8 +7162,11 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0298: spec->codec_variant = ALC269_TYPE_ALC298; break; + case 0x10ec0235: case 0x10ec0255: spec->codec_variant = ALC269_TYPE_ALC255; + spec->shutup = alc256_shutup; + spec->init_hook = alc256_init; break; case 0x10ec0236: case 0x10ec0256: diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 4c59983158e0..11b5b5e0e058 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -137,6 +137,7 @@ #include <linux/pci.h> #include <linux/math64.h> #include <linux/io.h> +#include <linux/nospec.h> #include <sound/core.h> #include <sound/control.h> @@ -5698,40 +5699,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream, struct snd_pcm_channel_info *info) { struct hdspm *hdspm = snd_pcm_substream_chip(substream); + unsigned int channel = info->channel; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) { + if (snd_BUG_ON(channel >= hdspm->max_channels_out)) { dev_info(hdspm->card->dev, "snd_hdspm_channel_info: output channel out of range (%d)\n", - info->channel); + channel); return -EINVAL; } - if (hdspm->channel_map_out[info->channel] < 0) { + channel = array_index_nospec(channel, hdspm->max_channels_out); + if (hdspm->channel_map_out[channel] < 0) { dev_info(hdspm->card->dev, "snd_hdspm_channel_info: output channel %d mapped out\n", - info->channel); + channel); return -EINVAL; } - info->offset = hdspm->channel_map_out[info->channel] * + info->offset = hdspm->channel_map_out[channel] * HDSPM_CHANNEL_BUFFER_BYTES; } else { - if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) { + if (snd_BUG_ON(channel >= hdspm->max_channels_in)) { dev_info(hdspm->card->dev, "snd_hdspm_channel_info: input channel out of range (%d)\n", - info->channel); + channel); return -EINVAL; } - if (hdspm->channel_map_in[info->channel] < 0) { + channel = array_index_nospec(channel, hdspm->max_channels_in); + if (hdspm->channel_map_in[channel] < 0) { dev_info(hdspm->card->dev, "snd_hdspm_channel_info: input channel %d mapped out\n", - info->channel); + channel); return -EINVAL; } - info->offset = hdspm->channel_map_in[info->channel] * + info->offset = hdspm->channel_map_in[channel] * HDSPM_CHANNEL_BUFFER_BYTES; } diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c index df648b1d9217..edd765e22377 100644 --- a/sound/pci/rme9652/rme9652.c +++ b/sound/pci/rme9652/rme9652.c @@ -26,6 +26,7 @@ #include <linux/pci.h> #include <linux/module.h> #include <linux/io.h> +#include <linux/nospec.h> #include <sound/core.h> #include <sound/control.h> @@ -2071,9 +2072,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream, if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) return -EINVAL; - if ((chn = rme9652->channel_map[info->channel]) < 0) { + chn = rme9652->channel_map[array_index_nospec(info->channel, + RME9652_NCHANNELS)]; + if (chn < 0) return -EINVAL; - } info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; info->first = 0; diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c index b205c782e494..f41560ecbcd1 100644 --- a/sound/soc/amd/acp-da7219-max98357a.c +++ b/sound/soc/amd/acp-da7219-max98357a.c @@ -43,7 +43,7 @@ #define DUAL_CHANNEL 2 static struct snd_soc_jack cz_jack; -struct clk *da7219_dai_clk; +static struct clk *da7219_dai_clk; static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) { diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c index 80c2a06285bb..12bf24c26818 100644 --- a/sound/soc/codecs/adau17x1.c +++ b/sound/soc/codecs/adau17x1.c @@ -502,7 +502,7 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream, } if (adau->sigmadsp) { - ret = adau17x1_setup_firmware(adau, params_rate(params)); + ret = adau17x1_setup_firmware(component, params_rate(params)); if (ret < 0) return ret; } @@ -835,26 +835,40 @@ bool adau17x1_volatile_register(struct device *dev, unsigned int reg) } EXPORT_SYMBOL_GPL(adau17x1_volatile_register); -int adau17x1_setup_firmware(struct adau *adau, unsigned int rate) +int adau17x1_setup_firmware(struct snd_soc_component *component, + unsigned int rate) { int ret; - int dspsr; + int dspsr, dsp_run; + struct adau *adau = snd_soc_component_get_drvdata(component); + struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); + + snd_soc_dapm_mutex_lock(dapm); ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr); if (ret) - return ret; + goto err; + + ret = regmap_read(adau->regmap, ADAU17X1_DSP_RUN, &dsp_run); + if (ret) + goto err; regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1); regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf); + regmap_write(adau->regmap, ADAU17X1_DSP_RUN, 0); ret = sigmadsp_setup(adau->sigmadsp, rate); if (ret) { regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0); - return ret; + goto err; } regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr); + regmap_write(adau->regmap, ADAU17X1_DSP_RUN, dsp_run); - return 0; +err: + snd_soc_dapm_mutex_unlock(dapm); + + return ret; } EXPORT_SYMBOL_GPL(adau17x1_setup_firmware); diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h index a7b1cb770814..e6fe87beec07 100644 --- a/sound/soc/codecs/adau17x1.h +++ b/sound/soc/codecs/adau17x1.h @@ -68,7 +68,8 @@ int adau17x1_resume(struct snd_soc_component *component); extern const struct snd_soc_dai_ops adau17x1_dai_ops; -int adau17x1_setup_firmware(struct adau *adau, unsigned int rate); +int adau17x1_setup_firmware(struct snd_soc_component *component, + unsigned int rate); bool adau17x1_has_dsp(struct adau *adau); #define ADAU17X1_CLOCK_CONTROL 0x4000 diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 12ee83d52405..b7cf7cce95fe 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -1187,7 +1187,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return irq; } - ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler, + ret = devm_request_threaded_irq(dev, irq, NULL, + pm8916_mbhc_switch_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mbhc switch irq", priv); @@ -1201,7 +1202,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return irq; } - ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler, + ret = devm_request_threaded_irq(dev, irq, NULL, + mbhc_btn_press_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mbhc btn press irq", priv); @@ -1214,7 +1216,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return irq; } - ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler, + ret = devm_request_threaded_irq(dev, irq, NULL, + mbhc_btn_release_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mbhc btn release irq", priv); diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index e8a66b03faab..1570b91bf018 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = { {RT5514_PLL3_CALIB_CTRL5, 0x40220012}, {RT5514_DELAY_BUF_CTRL1, 0x7fff006a}, {RT5514_DELAY_BUF_CTRL3, 0x00000000}, + {RT5514_ASRC_IN_CTRL1, 0x00000003}, {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, @@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg) case RT5514_PLL3_CALIB_CTRL5: case RT5514_DELAY_BUF_CTRL1: case RT5514_DELAY_BUF_CTRL3: + case RT5514_ASRC_IN_CTRL1: case RT5514_DOWNFILTER0_CTRL1: case RT5514_DOWNFILTER0_CTRL2: case RT5514_DOWNFILTER0_CTRL3: @@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev, case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5: case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1: case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3: + case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1: case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1: case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2: case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3: diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index 40a700493f4c..da8fd98c7f51 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -144,6 +144,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio, psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8; + /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */ + if (ratio <= 256) { + pm = ratio; + fp = 1; + goto out; + } + /* Set the max fluctuation -- 0.1% of the max devisor */ savesub = (psr ? 1 : 8) * 256 * maxfp / 1000; diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 0823b08923b5..89df2d9f63d7 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -217,6 +217,7 @@ struct fsl_ssi_soc_data { * @dai_fmt: DAI configuration this device is currently used with * @streams: Mask of current active streams: BIT(TX) and BIT(RX) * @i2s_net: I2S and Network mode configurations of SCR register + * (this is the initial settings based on the DAI format) * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK * @use_dma: DMA is used or FIQ with stream filter * @use_dual_fifo: DMA with support for dual FIFO mode @@ -829,16 +830,23 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream, } if (!fsl_ssi_is_ac97(ssi)) { + /* + * Keep the ssi->i2s_net intact while having a local variable + * to override settings for special use cases. Otherwise, the + * ssi->i2s_net will lose the settings for regular use cases. + */ + u8 i2s_net = ssi->i2s_net; + /* Normal + Network mode to send 16-bit data in 32-bit frames */ if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16) - ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; + i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; /* Use Normal mode to send mono data at 1st slot of 2 slots */ if (channels == 1) - ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL; + i2s_net = SSI_SCR_I2S_MODE_NORMAL; regmap_update_bits(regs, REG_SSI_SCR, - SSI_SCR_I2S_NET_MASK, ssi->i2s_net); + SSI_SCR_I2S_NET_MASK, i2s_net); } /* In synchronous mode, the SSI uses STCCR for capture */ diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index ceb105cbd461..addac2a8e52a 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -72,24 +72,28 @@ config SND_SOC_INTEL_BAYTRAIL for Baytrail Chromebooks but this option is now deprecated and is not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead. +config SND_SST_ATOM_HIFI2_PLATFORM + tristate + select SND_SOC_COMPRESS + config SND_SST_ATOM_HIFI2_PLATFORM_PCI - tristate "PCI HiFi2 (Medfield, Merrifield) Platforms" + tristate "PCI HiFi2 (Merrifield) Platforms" depends on X86 && PCI select SND_SST_IPC_PCI - select SND_SOC_COMPRESS + select SND_SST_ATOM_HIFI2_PLATFORM help - If you have a Intel Medfield or Merrifield/Edison platform, then + If you have a Intel Merrifield/Edison platform, then enable this option by saying Y or m. Distros will typically not - enable this option: Medfield devices are not available to - developers and while Merrifield/Edison can run a mainline kernel with - limited functionality it will require a firmware file which - is not in the standard firmware tree + enable this option: while Merrifield/Edison can run a mainline + kernel with limited functionality it will require a firmware file + which is not in the standard firmware tree -config SND_SST_ATOM_HIFI2_PLATFORM +config SND_SST_ATOM_HIFI2_PLATFORM_ACPI tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" + default ACPI depends on X86 && ACPI select SND_SST_IPC_ACPI - select SND_SOC_COMPRESS + select SND_SST_ATOM_HIFI2_PLATFORM select SND_SOC_ACPI_INTEL_MATCH select IOSF_MBI help diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c index 09db2aec12a3..b2f5d2fa354d 100644 --- a/sound/soc/omap/omap-dmic.c +++ b/sound/soc/omap/omap-dmic.c @@ -281,7 +281,7 @@ static int omap_dmic_dai_trigger(struct snd_pcm_substream *substream, static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, unsigned int freq) { - struct clk *parent_clk; + struct clk *parent_clk, *mux; char *parent_clk_name; int ret = 0; @@ -329,14 +329,21 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, return -ENODEV; } + mux = clk_get_parent(dmic->fclk); + if (IS_ERR(mux)) { + dev_err(dmic->dev, "can't get fck mux parent\n"); + clk_put(parent_clk); + return -ENODEV; + } + mutex_lock(&dmic->mutex); if (dmic->active) { /* disable clock while reparenting */ pm_runtime_put_sync(dmic->dev); - ret = clk_set_parent(dmic->fclk, parent_clk); + ret = clk_set_parent(mux, parent_clk); pm_runtime_get_sync(dmic->dev); } else { - ret = clk_set_parent(dmic->fclk, parent_clk); + ret = clk_set_parent(mux, parent_clk); } mutex_unlock(&dmic->mutex); @@ -349,6 +356,7 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, dmic->fclk_freq = freq; err_busy: + clk_put(mux); clk_put(parent_clk); return ret; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 6a76688a8ba9..94f081b93258 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1536,7 +1536,7 @@ static int rsnd_remove(struct platform_device *pdev) return ret; } -static int rsnd_suspend(struct device *dev) +static int __maybe_unused rsnd_suspend(struct device *dev) { struct rsnd_priv *priv = dev_get_drvdata(dev); @@ -1545,7 +1545,7 @@ static int rsnd_suspend(struct device *dev) return 0; } -static int rsnd_resume(struct device *dev) +static int __maybe_unused rsnd_resume(struct device *dev) { struct rsnd_priv *priv = dev_get_drvdata(dev); diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index fa27d0fca6dc..986b8b2f90fb 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -513,7 +513,7 @@ static void remove_widget(struct snd_soc_component *comp, */ if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) { /* enumerated widget mixer */ - for (i = 0; i < w->num_kcontrols; i++) { + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { struct snd_kcontrol *kcontrol = w->kcontrols[i]; struct soc_enum *se = (struct soc_enum *)kcontrol->private_value; @@ -530,7 +530,7 @@ static void remove_widget(struct snd_soc_component *comp, } } else { /* volume mixer or bytes controls */ - for (i = 0; i < w->num_kcontrols; i++) { + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { struct snd_kcontrol *kcontrol = w->kcontrols[i]; if (dobj->widget.kcontrol_type @@ -1325,8 +1325,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( ec->hdr.name); kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL); - if (kc[i].name == NULL) + if (kc[i].name == NULL) { + kfree(se); goto err_se; + } kc[i].private_value = (long)se; kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc[i].access = ec->hdr.access; @@ -1442,8 +1444,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create( be->hdr.name, be->hdr.access); kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL); - if (kc[i].name == NULL) + if (kc[i].name == NULL) { + kfree(sbe); goto err; + } kc[i].private_value = (long)sbe; kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc[i].access = be->hdr.access; @@ -2576,7 +2580,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index) /* match index */ if (dobj->index != index && - dobj->index != SND_SOC_TPLG_INDEX_ALL) + index != SND_SOC_TPLG_INDEX_ALL) continue; switch (dobj->type) { diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c index 6d7cde56a355..e2cf55c53ea8 100644 --- a/sound/usb/line6/midi.c +++ b/sound/usb/line6/midi.c @@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data, } usb_fill_int_urb(urb, line6->usbdev, - usb_sndbulkpipe(line6->usbdev, + usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w), transfer_buffer, length, midi_sent, line6, line6->interval); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 301ad61ed426..344d7b069d59 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1776,7 +1776,8 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, build_feature_ctl(state, _ftr, ch_bits, control, &iterm, unitid, ch_read_only); if (uac_v2v3_control_is_readable(master_bits, control)) - build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, + build_feature_ctl(state, _ftr, 0, control, + &iterm, unitid, !uac_v2v3_control_is_writeable(master_bits, control)); } @@ -1859,7 +1860,7 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid, check_input_term(state, d->bTerminalID, &iterm); if (state->mixer->protocol == UAC_VERSION_2) { /* Check for jack detection. */ - if (uac_v2v3_control_is_readable(d->bmControls, + if (uac_v2v3_control_is_readable(le16_to_cpu(d->bmControls), UAC2_TE_CONNECTOR)) { build_connector_control(state, &iterm, true); } @@ -2561,7 +2562,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (err < 0 && err != -EINVAL) return err; - if (uac_v2v3_control_is_readable(desc->bmControls, + if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), UAC2_TE_CONNECTOR)) { build_connector_control(&state, &state.oterm, false); diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 9038b2e7df73..eaa03acd4686 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -353,8 +353,11 @@ static struct usbmix_name_map bose_companion5_map[] = { /* * Dell usb dock with ALC4020 codec had a firmware problem where it got * screwed up when zero volume is passed; just skip it as a workaround + * + * Also the extension unit gives an access error, so skip it as well. */ static const struct usbmix_name_map dell_alc4020_map[] = { + { 4, NULL }, /* extension unit */ { 16, NULL }, { 19, NULL }, { 0 } diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 6a8f5843334e..956be9f7c72a 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor * TODO: this conversion is not complete, update it * after adding UAC3 values to asound.h */ - switch (is->bChPurpose) { + switch (is->bChRelationship) { case UAC3_CH_MONO: map = SNDRV_CHMAP_MONO; break; diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index ebcab5c5465d..8082f7b077f1 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c @@ -139,7 +139,7 @@ static void usb_stream_hwdep_vm_open(struct vm_area_struct *area) snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } -static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) +static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf) { unsigned long offset; struct page *page; diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c index d8bd7c99b48c..c1dd9a7b48df 100644 --- a/sound/usb/usx2y/usX2Yhwdep.c +++ b/sound/usb/usx2y/usX2Yhwdep.c @@ -31,7 +31,7 @@ #include "usbusx2y.h" #include "usX2Yhwdep.h" -static int snd_us428ctls_vm_fault(struct vm_fault *vmf) +static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) { unsigned long offset; struct page * page; diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c index 0d050528a4e1..4fd9276b8e50 100644 --- a/sound/usb/usx2y/usx2yhwdeppcm.c +++ b/sound/usb/usx2y/usx2yhwdeppcm.c @@ -652,7 +652,7 @@ static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area) } -static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) +static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) { unsigned long offset; void *vaddr; diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 6edd177bb1c7..caae4843cb70 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -135,6 +135,15 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_CRM_SHIFT 7 #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_SHIFT 11 +/* + * For KVM currently all guest registers are nonsecure, but we reserve a bit + * in the encoding to distinguish secure from nonsecure for AArch32 system + * registers that are banked by security. This is 1 for the secure banked + * register, and 0 for the nonsecure banked register or if the register is + * not banked by security. + */ +#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 +#define KVM_REG_ARM_SECURE_SHIFT 28 #define ARM_CP15_REG_SHIFT_MASK(x,n) \ (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) @@ -186,6 +195,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 9abbf3044654..04b3256f8e6d 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -206,6 +206,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index d554c11e01ff..578793e97431 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -320,6 +320,7 @@ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index fb3a6de7440b..6847d85400a8 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -53,12 +53,6 @@ # define NEED_MOVBE 0 #endif -#ifdef CONFIG_X86_5LEVEL -# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) -#else -# define NEED_LA57 0 -#endif - #ifdef CONFIG_X86_64 #ifdef CONFIG_PARAVIRT /* Paravirtualized systems may not have PSE or PGE available */ @@ -104,7 +98,7 @@ #define REQUIRED_MASK13 0 #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 -#define REQUIRED_MASK16 (NEED_LA57) +#define REQUIRED_MASK16 0 #define REQUIRED_MASK17 0 #define REQUIRED_MASK18 0 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index f3a960488eae..c535c2fdea13 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -354,8 +354,25 @@ struct kvm_xcrs { __u64 padding[16]; }; -/* definition of registers in kvm_run */ +#define KVM_SYNC_X86_REGS (1UL << 0) +#define KVM_SYNC_X86_SREGS (1UL << 1) +#define KVM_SYNC_X86_EVENTS (1UL << 2) + +#define KVM_SYNC_X86_VALID_FIELDS \ + (KVM_SYNC_X86_REGS| \ + KVM_SYNC_X86_SREGS| \ + KVM_SYNC_X86_EVENTS) + +/* kvm_sync_regs struct included by kvm_run struct */ struct kvm_sync_regs { + /* Members of this structure are potentially malicious. + * Care must be taken by code reading, esp. interpreting, + * data fields from them inside KVM to prevent TOCTOU and + * double-fetch types of vulnerabilities. + */ + struct kvm_regs regs; + struct kvm_sregs sregs; + struct kvm_vcpu_events events; }; #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 1ea545965ee3..53b60ad452f5 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -76,6 +76,8 @@ $(OUTPUT)bpf_asm: $(OUTPUT)bpf_asm.o $(OUTPUT)bpf_exp.yacc.o $(OUTPUT)bpf_exp.le $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c +$(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c +$(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c clean: bpftool_clean $(call QUIET_CLEAN, bpf-progs) diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c index 4f254bcc4423..61b9aa5d6415 100644 --- a/tools/bpf/bpf_dbg.c +++ b/tools/bpf/bpf_dbg.c @@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file) static int cmd_load(char *arg) { - char *subcmd, *cont, *tmp = strdup(arg); + char *subcmd, *cont = NULL, *tmp = strdup(arg); int ret = CMD_OK; subcmd = strtok_r(tmp, " ", &cont); @@ -1073,7 +1073,10 @@ static int cmd_load(char *arg) bpf_reset(); bpf_reset_breakpoints(); - ret = cmd_load_bpf(cont); + if (!cont) + ret = CMD_ERR; + else + ret = cmd_load_bpf(cont); } else if (matches(subcmd, "pcap") == 0) { ret = cmd_load_pcap(cont); } else { diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 04e32f965ad7..1827c2f973f9 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -151,11 +151,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ -#define READ_ONCE(x) \ - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) - -#define WRITE_ONCE(x, val) \ - ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) +#define READ_ONCE(x) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__c = { 0 } }; \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) #ifndef __fallthrough diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/tools/include/linux/coresight-pmu.h +++ b/tools/include/linux/coresight-pmu.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _LINUX_CORESIGHT_PMU_H diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index f8b134f5608f..e7ee32861d51 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -27,6 +27,9 @@ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ #endif +/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ +#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ + /* * Flags for mlock */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9d07465023a2..c5ec89732a8d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -864,6 +864,7 @@ enum bpf_func_id { /* BPF_FUNC_skb_set_tunnel_key flags. */ #define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_DONT_FRAGMENT (1ULL << 2) +#define BPF_F_SEQ_NUMBER (1ULL << 3) /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 6d9447700e18..68699f654118 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -941,4 +941,43 @@ enum { IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ }; +/* tun section */ + +enum { + IFLA_TUN_UNSPEC, + IFLA_TUN_OWNER, + IFLA_TUN_GROUP, + IFLA_TUN_TYPE, + IFLA_TUN_PI, + IFLA_TUN_VNET_HDR, + IFLA_TUN_PERSIST, + IFLA_TUN_MULTI_QUEUE, + IFLA_TUN_NUM_QUEUES, + IFLA_TUN_NUM_DISABLED_QUEUES, + __IFLA_TUN_MAX, +}; + +#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) + +enum { + IFLA_RMNET_UNSPEC, + IFLA_RMNET_MUX_ID, + IFLA_RMNET_FLAGS, + __IFLA_RMNET_MAX, +}; + +#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) + +struct ifla_rmnet_flags { + __u32 flags; + __u32 mask; +}; + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 6b89f87db200..b02c41e53d56 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -396,6 +396,10 @@ struct kvm_run { char padding[256]; }; + /* 2048 is the size of the char array used to bound/pad the size + * of the union that holds sync regs. + */ + #define SYNC_REGS_SIZE_BYTES 2048 /* * shared registers between kvm and userspace. * kvm_valid_regs specifies the register classes set by the host @@ -407,7 +411,7 @@ struct kvm_run { __u64 kvm_dirty_regs; union { struct kvm_sync_regs regs; - char padding[2048]; + char padding[SYNC_REGS_SIZE_BYTES]; } s; }; @@ -672,6 +676,13 @@ struct kvm_ioeventfd { __u8 pad[36]; }; +#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) +#define KVM_X86_DISABLE_EXITS_HTL (1 << 1) +#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) +#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ + KVM_X86_DISABLE_EXITS_HTL | \ + KVM_X86_DISABLE_EXITS_PAUSE) + /* for KVM_ENABLE_CAP */ struct kvm_enable_cap { /* in */ @@ -936,6 +947,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_GET_CPU_CHAR 151 #define KVM_CAP_S390_BPB 152 #define KVM_CAP_GET_MSR_FEATURES 153 +#define KVM_CAP_HYPERV_EVENTFD 154 #ifdef KVM_CAP_IRQ_ROUTING @@ -1375,6 +1387,10 @@ struct kvm_enc_region { #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) +/* Available with KVM_CAP_HYPERV_EVENTFD */ +#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) + + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ @@ -1515,4 +1531,14 @@ struct kvm_assigned_msix_entry { #define KVM_ARM_DEV_EL1_PTIMER (1 << 1) #define KVM_ARM_DEV_PMU (1 << 2) +struct kvm_hyperv_eventfd { + __u32 conn_id; + __s32 fd; + __u32 flags; + __u32 padding[3]; +}; + +#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff +#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) + #endif /* __LINUX_KVM_H */ diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 912b85b52344..b8e288a1f740 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -650,11 +650,23 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* - * Indicates that the content of PERF_SAMPLE_IP points to - * the actual instruction that triggered the event. See also - * perf_event_attr::precise_ip. + * These PERF_RECORD_MISC_* flags below are safely reused + * for the following events: + * + * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events + * + * + * PERF_RECORD_MISC_EXACT_IP: + * Indicates that the content of PERF_SAMPLE_IP points to + * the actual instruction that triggered the event. See also + * perf_event_attr::precise_ip. + * + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: + * Indicates that thread was preempted in TASK_RUNNING state. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) +#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index 07d61583fd02..ed0a120d4f08 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h @@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t; #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE +#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8 #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index f6a1babcbac4..cb7154eccbdc 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c @@ -433,7 +433,7 @@ match: if (ambiguous_option) { fprintf(stderr, - " Error: Ambiguous option: %s (could be --%s%s or --%s%s)", + " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n", arg, (ambiguous_flags & OPT_UNSET) ? "no-" : "", ambiguous_option->long_name, @@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options) return; if (strstarts(arg, "no-")) { - fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); + fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); exit(129); } @@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options) if (!options->long_name) continue; if (strstarts(options->long_name, arg)) { - fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); + fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); exit(129); } } diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 8ae824dbfca3..f76d9914686a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ -I$(srctree)/tools/objtool/arch/$(ARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) -LDFLAGS += -lelf $(LIBSUBCMD) +CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) +LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) # Allow old libelf to be used: elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index 5b4fff3adc4b..32f4a898e3f2 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt @@ -334,6 +334,11 @@ annotate.*:: 99.93 │ mov %eax,%eax + annotate.offset_level:: + Default is '1', meaning just jump targets will have offsets show right beside + the instruction. When set to '2' 'call' instructions will also have its offsets + shown, 3 or higher will show offsets for all instructions. + hist.*:: hist.percentage:: This option control the way to calculate overhead of filtered entries - diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index b0211410969b..f8d2167cf3e7 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt @@ -28,29 +28,46 @@ OPTIONS <command>...:: Any command you can specify in a shell. +-i:: +--input=<file>:: + Input file name. + -f:: --force:: Don't do ownership validation -t:: ---type=:: +--type=<type>:: Select the memory operation type: load or store (default: load,store) -D:: ---dump-raw-samples=:: +--dump-raw-samples:: Dump the raw decoded samples on the screen in a format that is easy to parse with one sample per line. -x:: ---field-separator:: +--field-separator=<separator>:: Specify the field separator used when dump raw samples (-D option). By default, The separator is the space character. -C:: ---cpu-list:: - Restrict dump of raw samples to those provided via this option. Note that the same - option can be passed in record mode. It will be interpreted the same way as perf - record. +--cpu=<cpu>:: + Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a + comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default + is to monitor all CPUS. +-U:: +--hide-unresolved:: + Only display entries resolved to a symbol. + +-p:: +--phys-data:: + Record/Report sample physical addresses + +RECORD OPTIONS +-------------- +-e:: +--event <event>:: + Event selector. Use 'perf mem record -e list' to list available events. -K:: --all-kernel:: @@ -60,12 +77,15 @@ OPTIONS --all-user:: Configure all used events to run in user space. ---ldload:: +-v:: +--verbose:: + Be more verbose (show counter open errors, etc) + +--ldlat <n>:: Specify desired latency for loads event. --p:: ---phys-data:: - Record/Report sample physical addresses +In addition, for report all perf report options are valid, and for record +all perf record options. SEE ALSO -------- diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt index bb33601a823b..63f938b887dd 100644 --- a/tools/perf/Documentation/perf-sched.txt +++ b/tools/perf/Documentation/perf-sched.txt @@ -104,8 +104,8 @@ OPTIONS for 'perf sched timehist' kallsyms pathname -g:: ---no-call-graph:: - Do not display call chains if present. +--call-graph:: + Display call chains if present (default on). --max-stack:: Maximum number of functions to display in backtrace, default 5. diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 36ec0257f8d3..afdafe2110a1 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -228,14 +228,15 @@ OPTIONS For sample events it's possible to display misc field with -F +misc option, following letters are displayed for each bit: - PERF_RECORD_MISC_KERNEL K - PERF_RECORD_MISC_USER U - PERF_RECORD_MISC_HYPERVISOR H - PERF_RECORD_MISC_GUEST_KERNEL G - PERF_RECORD_MISC_GUEST_USER g - PERF_RECORD_MISC_MMAP_DATA* M - PERF_RECORD_MISC_COMM_EXEC E - PERF_RECORD_MISC_SWITCH_OUT S + PERF_RECORD_MISC_KERNEL K + PERF_RECORD_MISC_USER U + PERF_RECORD_MISC_HYPERVISOR H + PERF_RECORD_MISC_GUEST_KERNEL G + PERF_RECORD_MISC_GUEST_USER g + PERF_RECORD_MISC_MMAP_DATA* M + PERF_RECORD_MISC_COMM_EXEC E + PERF_RECORD_MISC_SWITCH_OUT S + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp $ perf script -F +misc ... sched-messaging 1414 K 28690.636582: 4590 cycles ... diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index f15b306be183..e6c3b4e555c2 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -153,7 +153,7 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m -I msecs:: --interval-print msecs:: -Print count deltas every N milliseconds (minimum: 10ms) +Print count deltas every N milliseconds (minimum: 1ms) The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. example: 'perf stat -I 1000 -e cycles -a sleep 5' diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index c7abd83a8e19..ae7dc46e8f8a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -68,7 +68,7 @@ ifeq ($(NO_PERF_REGS),0) endif ifneq ($(NO_SYSCALL_TABLE),1) - CFLAGS += -DHAVE_SYSCALL_TABLE + CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT endif # So far there's only x86 and arm libdw unwind support merged in perf. @@ -847,7 +847,7 @@ ifndef NO_JVMTI ifeq ($(feature-jvmti), 1) $(call detected_var,JDIR) else - $(warning No openjdk development package found, please install JDK package) + $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel) NO_JVMTI := 1 endif endif diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h new file mode 100644 index 000000000000..90ec4c8cb880 --- /dev/null +++ b/tools/perf/arch/arm/include/arch-tests.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_TESTS_H +#define ARCH_TESTS_H + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +#endif + +extern struct test arch_tests[]; + +#endif diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build index b30eff9bcc83..883c57ff0c08 100644 --- a/tools/perf/arch/arm/tests/Build +++ b/tools/perf/arch/arm/tests/Build @@ -1,2 +1,4 @@ libperf-y += regs_load.o libperf-y += dwarf-unwind.o + +libperf-y += arch-tests.o diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c new file mode 100644 index 000000000000..5b1543c98022 --- /dev/null +++ b/tools/perf/arch/arm/tests/arch-tests.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <string.h> +#include "tests/tests.h" +#include "arch-tests.h" + +struct test arch_tests[] = { +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "DWARF unwind", + .func = test__dwarf_unwind, + }, +#endif + { + .func = NULL, + }, +}; diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index fa639e3e52ac..1ce6bdbda561 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdbool.h> diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 5c655ad4621e..2f595cd73da6 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <api/fs/fs.h> diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h index 5256741be549..1a12e64f5127 100644 --- a/tools/perf/arch/arm/util/cs-etm.h +++ b/tools/perf/arch/arm/util/cs-etm.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef INCLUDE__PERF_CS_ETM_H__ diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c index ac4dffc807b8..e047571e6080 100644 --- a/tools/perf/arch/arm/util/pmu.c +++ b/tools/perf/arch/arm/util/pmu.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <string.h> diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c index 6cb48e4cffd9..3afe8256eff2 100644 --- a/tools/perf/arch/s390/util/auxtrace.c +++ b/tools/perf/arch/s390/util/auxtrace.c @@ -87,6 +87,7 @@ struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist, struct perf_evsel *pos; int diagnose = 0; + *err = 0; if (evlist->nr_entries == 0) return NULL; diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c index a4c30f1c70be..163b92f33998 100644 --- a/tools/perf/arch/s390/util/header.c +++ b/tools/perf/arch/s390/util/header.c @@ -146,21 +146,3 @@ char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused) zfree(&buf); return buf; } - -/* - * Compare the cpuid string returned by get_cpuid() function - * with the name generated by the jevents file read from - * pmu-events/arch/s390/mapfile.csv. - * - * Parameter mapcpuid is the cpuid as stored in the - * pmu-events/arch/s390/mapfile.csv. This is just the type number. - * Parameter cpuid is the cpuid returned by function get_cpuid(). - */ -int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) -{ - char *cp = strchr(cpuid, ','); - - if (cp == NULL) - return -1; - return strncmp(cp + 1, mapcpuid, strlen(mapcpuid)); -} diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile index d74eaa7aa927..1a38e78117ce 100644 --- a/tools/perf/arch/x86/Makefile +++ b/tools/perf/arch/x86/Makefile @@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sys)/syscall_64.tbl $(systbl) @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ - || echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true + || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ clean:: diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c index 5bd1ba8c0282..44f5aba78210 100644 --- a/tools/perf/arch/x86/annotate/instructions.c +++ b/tools/perf/arch/x86/annotate/instructions.c @@ -1,21 +1,43 @@ // SPDX-License-Identifier: GPL-2.0 static struct ins x86__instructions[] = { + { .name = "adc", .ops = &mov_ops, }, + { .name = "adcb", .ops = &mov_ops, }, + { .name = "adcl", .ops = &mov_ops, }, { .name = "add", .ops = &mov_ops, }, { .name = "addl", .ops = &mov_ops, }, { .name = "addq", .ops = &mov_ops, }, + { .name = "addsd", .ops = &mov_ops, }, { .name = "addw", .ops = &mov_ops, }, { .name = "and", .ops = &mov_ops, }, + { .name = "andb", .ops = &mov_ops, }, + { .name = "andl", .ops = &mov_ops, }, + { .name = "andpd", .ops = &mov_ops, }, + { .name = "andps", .ops = &mov_ops, }, + { .name = "andq", .ops = &mov_ops, }, + { .name = "andw", .ops = &mov_ops, }, + { .name = "bsr", .ops = &mov_ops, }, + { .name = "bt", .ops = &mov_ops, }, + { .name = "btr", .ops = &mov_ops, }, { .name = "bts", .ops = &mov_ops, }, + { .name = "btsq", .ops = &mov_ops, }, { .name = "call", .ops = &call_ops, }, { .name = "callq", .ops = &call_ops, }, + { .name = "cmovbe", .ops = &mov_ops, }, + { .name = "cmove", .ops = &mov_ops, }, + { .name = "cmovae", .ops = &mov_ops, }, { .name = "cmp", .ops = &mov_ops, }, { .name = "cmpb", .ops = &mov_ops, }, { .name = "cmpl", .ops = &mov_ops, }, { .name = "cmpq", .ops = &mov_ops, }, { .name = "cmpw", .ops = &mov_ops, }, { .name = "cmpxch", .ops = &mov_ops, }, + { .name = "cmpxchg", .ops = &mov_ops, }, + { .name = "cs", .ops = &mov_ops, }, { .name = "dec", .ops = &dec_ops, }, { .name = "decl", .ops = &dec_ops, }, + { .name = "divsd", .ops = &mov_ops, }, + { .name = "divss", .ops = &mov_ops, }, + { .name = "gs", .ops = &mov_ops, }, { .name = "imul", .ops = &mov_ops, }, { .name = "inc", .ops = &dec_ops, }, { .name = "incl", .ops = &dec_ops, }, @@ -57,25 +79,68 @@ static struct ins x86__instructions[] = { { .name = "lea", .ops = &mov_ops, }, { .name = "lock", .ops = &lock_ops, }, { .name = "mov", .ops = &mov_ops, }, + { .name = "movapd", .ops = &mov_ops, }, + { .name = "movaps", .ops = &mov_ops, }, { .name = "movb", .ops = &mov_ops, }, { .name = "movdqa", .ops = &mov_ops, }, + { .name = "movdqu", .ops = &mov_ops, }, { .name = "movl", .ops = &mov_ops, }, { .name = "movq", .ops = &mov_ops, }, + { .name = "movsd", .ops = &mov_ops, }, { .name = "movslq", .ops = &mov_ops, }, + { .name = "movss", .ops = &mov_ops, }, + { .name = "movupd", .ops = &mov_ops, }, + { .name = "movups", .ops = &mov_ops, }, + { .name = "movw", .ops = &mov_ops, }, { .name = "movzbl", .ops = &mov_ops, }, { .name = "movzwl", .ops = &mov_ops, }, + { .name = "mulsd", .ops = &mov_ops, }, + { .name = "mulss", .ops = &mov_ops, }, { .name = "nop", .ops = &nop_ops, }, { .name = "nopl", .ops = &nop_ops, }, { .name = "nopw", .ops = &nop_ops, }, { .name = "or", .ops = &mov_ops, }, + { .name = "orb", .ops = &mov_ops, }, { .name = "orl", .ops = &mov_ops, }, + { .name = "orps", .ops = &mov_ops, }, + { .name = "orq", .ops = &mov_ops, }, + { .name = "pand", .ops = &mov_ops, }, + { .name = "paddq", .ops = &mov_ops, }, + { .name = "pcmpeqb", .ops = &mov_ops, }, + { .name = "por", .ops = &mov_ops, }, + { .name = "rclb", .ops = &mov_ops, }, + { .name = "rcll", .ops = &mov_ops, }, + { .name = "retq", .ops = &ret_ops, }, + { .name = "sbb", .ops = &mov_ops, }, + { .name = "sbbl", .ops = &mov_ops, }, + { .name = "sete", .ops = &mov_ops, }, + { .name = "sub", .ops = &mov_ops, }, + { .name = "subl", .ops = &mov_ops, }, + { .name = "subq", .ops = &mov_ops, }, + { .name = "subsd", .ops = &mov_ops, }, + { .name = "subw", .ops = &mov_ops, }, { .name = "test", .ops = &mov_ops, }, { .name = "testb", .ops = &mov_ops, }, { .name = "testl", .ops = &mov_ops, }, + { .name = "ucomisd", .ops = &mov_ops, }, + { .name = "ucomiss", .ops = &mov_ops, }, + { .name = "vaddsd", .ops = &mov_ops, }, + { .name = "vandpd", .ops = &mov_ops, }, + { .name = "vmovdqa", .ops = &mov_ops, }, + { .name = "vmovq", .ops = &mov_ops, }, + { .name = "vmovsd", .ops = &mov_ops, }, + { .name = "vmulsd", .ops = &mov_ops, }, + { .name = "vorpd", .ops = &mov_ops, }, + { .name = "vsubsd", .ops = &mov_ops, }, + { .name = "vucomisd", .ops = &mov_ops, }, { .name = "xadd", .ops = &mov_ops, }, { .name = "xbeginl", .ops = &jump_ops, }, { .name = "xbeginq", .ops = &jump_ops, }, - { .name = "retq", .ops = &ret_ops, }, + { .name = "xchg", .ops = &mov_ops, }, + { .name = "xor", .ops = &mov_ops, }, + { .name = "xorb", .ops = &mov_ops, }, + { .name = "xorpd", .ops = &mov_ops, }, + { .name = "xorps", .ops = &mov_ops, }, }; static bool x86__ins_is_fused(struct arch *arch, const char *ins1, diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 5aef183e2f85..4dfe42666d0c 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -4,379 +4,383 @@ # The format is: # <number> <abi> <name> <entry point> # +# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls +# # The abi is "common", "64" or "x32" for this file. # -0 common read sys_read -1 common write sys_write -2 common open sys_open -3 common close sys_close -4 common stat sys_newstat -5 common fstat sys_newfstat -6 common lstat sys_newlstat -7 common poll sys_poll -8 common lseek sys_lseek -9 common mmap sys_mmap -10 common mprotect sys_mprotect -11 common munmap sys_munmap -12 common brk sys_brk -13 64 rt_sigaction sys_rt_sigaction -14 common rt_sigprocmask sys_rt_sigprocmask -15 64 rt_sigreturn sys_rt_sigreturn/ptregs -16 64 ioctl sys_ioctl -17 common pread64 sys_pread64 -18 common pwrite64 sys_pwrite64 -19 64 readv sys_readv -20 64 writev sys_writev -21 common access sys_access -22 common pipe sys_pipe -23 common select sys_select -24 common sched_yield sys_sched_yield -25 common mremap sys_mremap -26 common msync sys_msync -27 common mincore sys_mincore -28 common madvise sys_madvise -29 common shmget sys_shmget -30 common shmat sys_shmat -31 common shmctl sys_shmctl -32 common dup sys_dup -33 common dup2 sys_dup2 -34 common pause sys_pause -35 common nanosleep sys_nanosleep -36 common getitimer sys_getitimer -37 common alarm sys_alarm -38 common setitimer sys_setitimer -39 common getpid sys_getpid -40 common sendfile sys_sendfile64 -41 common socket sys_socket -42 common connect sys_connect -43 common accept sys_accept -44 common sendto sys_sendto -45 64 recvfrom sys_recvfrom -46 64 sendmsg sys_sendmsg -47 64 recvmsg sys_recvmsg -48 common shutdown sys_shutdown -49 common bind sys_bind -50 common listen sys_listen -51 common getsockname sys_getsockname -52 common getpeername sys_getpeername -53 common socketpair sys_socketpair -54 64 setsockopt sys_setsockopt -55 64 getsockopt sys_getsockopt -56 common clone sys_clone/ptregs -57 common fork sys_fork/ptregs -58 common vfork sys_vfork/ptregs -59 64 execve sys_execve/ptregs -60 common exit sys_exit -61 common wait4 sys_wait4 -62 common kill sys_kill -63 common uname sys_newuname -64 common semget sys_semget -65 common semop sys_semop -66 common semctl sys_semctl -67 common shmdt sys_shmdt -68 common msgget sys_msgget -69 common msgsnd sys_msgsnd -70 common msgrcv sys_msgrcv -71 common msgctl sys_msgctl -72 common fcntl sys_fcntl -73 common flock sys_flock -74 common fsync sys_fsync -75 common fdatasync sys_fdatasync -76 common truncate sys_truncate -77 common ftruncate sys_ftruncate -78 common getdents sys_getdents -79 common getcwd sys_getcwd -80 common chdir sys_chdir -81 common fchdir sys_fchdir -82 common rename sys_rename -83 common mkdir sys_mkdir -84 common rmdir sys_rmdir -85 common creat sys_creat -86 common link sys_link -87 common unlink sys_unlink -88 common symlink sys_symlink -89 common readlink sys_readlink -90 common chmod sys_chmod -91 common fchmod sys_fchmod -92 common chown sys_chown -93 common fchown sys_fchown -94 common lchown sys_lchown -95 common umask sys_umask -96 common gettimeofday sys_gettimeofday -97 common getrlimit sys_getrlimit -98 common getrusage sys_getrusage -99 common sysinfo sys_sysinfo -100 common times sys_times -101 64 ptrace sys_ptrace -102 common getuid sys_getuid -103 common syslog sys_syslog -104 common getgid sys_getgid -105 common setuid sys_setuid -106 common setgid sys_setgid -107 common geteuid sys_geteuid -108 common getegid sys_getegid -109 common setpgid sys_setpgid -110 common getppid sys_getppid -111 common getpgrp sys_getpgrp -112 common setsid sys_setsid -113 common setreuid sys_setreuid -114 common setregid sys_setregid -115 common getgroups sys_getgroups -116 common setgroups sys_setgroups -117 common setresuid sys_setresuid -118 common getresuid sys_getresuid -119 common setresgid sys_setresgid -120 common getresgid sys_getresgid -121 common getpgid sys_getpgid -122 common setfsuid sys_setfsuid -123 common setfsgid sys_setfsgid -124 common getsid sys_getsid -125 common capget sys_capget -126 common capset sys_capset -127 64 rt_sigpending sys_rt_sigpending -128 64 rt_sigtimedwait sys_rt_sigtimedwait -129 64 rt_sigqueueinfo sys_rt_sigqueueinfo -130 common rt_sigsuspend sys_rt_sigsuspend -131 64 sigaltstack sys_sigaltstack -132 common utime sys_utime -133 common mknod sys_mknod +0 common read __x64_sys_read +1 common write __x64_sys_write +2 common open __x64_sys_open +3 common close __x64_sys_close +4 common stat __x64_sys_newstat +5 common fstat __x64_sys_newfstat +6 common lstat __x64_sys_newlstat +7 common poll __x64_sys_poll +8 common lseek __x64_sys_lseek +9 common mmap __x64_sys_mmap +10 common mprotect __x64_sys_mprotect +11 common munmap __x64_sys_munmap +12 common brk __x64_sys_brk +13 64 rt_sigaction __x64_sys_rt_sigaction +14 common rt_sigprocmask __x64_sys_rt_sigprocmask +15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs +16 64 ioctl __x64_sys_ioctl +17 common pread64 __x64_sys_pread64 +18 common pwrite64 __x64_sys_pwrite64 +19 64 readv __x64_sys_readv +20 64 writev __x64_sys_writev +21 common access __x64_sys_access +22 common pipe __x64_sys_pipe +23 common select __x64_sys_select +24 common sched_yield __x64_sys_sched_yield +25 common mremap __x64_sys_mremap +26 common msync __x64_sys_msync +27 common mincore __x64_sys_mincore +28 common madvise __x64_sys_madvise +29 common shmget __x64_sys_shmget +30 common shmat __x64_sys_shmat +31 common shmctl __x64_sys_shmctl +32 common dup __x64_sys_dup +33 common dup2 __x64_sys_dup2 +34 common pause __x64_sys_pause +35 common nanosleep __x64_sys_nanosleep +36 common getitimer __x64_sys_getitimer +37 common alarm __x64_sys_alarm +38 common setitimer __x64_sys_setitimer +39 common getpid __x64_sys_getpid +40 common sendfile __x64_sys_sendfile64 +41 common socket __x64_sys_socket +42 common connect __x64_sys_connect +43 common accept __x64_sys_accept +44 common sendto __x64_sys_sendto +45 64 recvfrom __x64_sys_recvfrom +46 64 sendmsg __x64_sys_sendmsg +47 64 recvmsg __x64_sys_recvmsg +48 common shutdown __x64_sys_shutdown +49 common bind __x64_sys_bind +50 common listen __x64_sys_listen +51 common getsockname __x64_sys_getsockname +52 common getpeername __x64_sys_getpeername +53 common socketpair __x64_sys_socketpair +54 64 setsockopt __x64_sys_setsockopt +55 64 getsockopt __x64_sys_getsockopt +56 common clone __x64_sys_clone/ptregs +57 common fork __x64_sys_fork/ptregs +58 common vfork __x64_sys_vfork/ptregs +59 64 execve __x64_sys_execve/ptregs +60 common exit __x64_sys_exit +61 common wait4 __x64_sys_wait4 +62 common kill __x64_sys_kill +63 common uname __x64_sys_newuname +64 common semget __x64_sys_semget +65 common semop __x64_sys_semop +66 common semctl __x64_sys_semctl +67 common shmdt __x64_sys_shmdt +68 common msgget __x64_sys_msgget +69 common msgsnd __x64_sys_msgsnd +70 common msgrcv __x64_sys_msgrcv +71 common msgctl __x64_sys_msgctl +72 common fcntl __x64_sys_fcntl +73 common flock __x64_sys_flock +74 common fsync __x64_sys_fsync +75 common fdatasync __x64_sys_fdatasync +76 common truncate __x64_sys_truncate +77 common ftruncate __x64_sys_ftruncate +78 common getdents __x64_sys_getdents +79 common getcwd __x64_sys_getcwd +80 common chdir __x64_sys_chdir +81 common fchdir __x64_sys_fchdir +82 common rename __x64_sys_rename +83 common mkdir __x64_sys_mkdir +84 common rmdir __x64_sys_rmdir +85 common creat __x64_sys_creat +86 common link __x64_sys_link +87 common unlink __x64_sys_unlink +88 common symlink __x64_sys_symlink +89 common readlink __x64_sys_readlink +90 common chmod __x64_sys_chmod +91 common fchmod __x64_sys_fchmod +92 common chown __x64_sys_chown +93 common fchown __x64_sys_fchown +94 common lchown __x64_sys_lchown +95 common umask __x64_sys_umask +96 common gettimeofday __x64_sys_gettimeofday +97 common getrlimit __x64_sys_getrlimit +98 common getrusage __x64_sys_getrusage +99 common sysinfo __x64_sys_sysinfo +100 common times __x64_sys_times +101 64 ptrace __x64_sys_ptrace +102 common getuid __x64_sys_getuid +103 common syslog __x64_sys_syslog +104 common getgid __x64_sys_getgid +105 common setuid __x64_sys_setuid +106 common setgid __x64_sys_setgid +107 common geteuid __x64_sys_geteuid +108 common getegid __x64_sys_getegid +109 common setpgid __x64_sys_setpgid +110 common getppid __x64_sys_getppid +111 common getpgrp __x64_sys_getpgrp +112 common setsid __x64_sys_setsid +113 common setreuid __x64_sys_setreuid +114 common setregid __x64_sys_setregid +115 common getgroups __x64_sys_getgroups +116 common setgroups __x64_sys_setgroups +117 common setresuid __x64_sys_setresuid +118 common getresuid __x64_sys_getresuid +119 common setresgid __x64_sys_setresgid +120 common getresgid __x64_sys_getresgid +121 common getpgid __x64_sys_getpgid +122 common setfsuid __x64_sys_setfsuid +123 common setfsgid __x64_sys_setfsgid +124 common getsid __x64_sys_getsid +125 common capget __x64_sys_capget +126 common capset __x64_sys_capset +127 64 rt_sigpending __x64_sys_rt_sigpending +128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait +129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo +130 common rt_sigsuspend __x64_sys_rt_sigsuspend +131 64 sigaltstack __x64_sys_sigaltstack +132 common utime __x64_sys_utime +133 common mknod __x64_sys_mknod 134 64 uselib -135 common personality sys_personality -136 common ustat sys_ustat -137 common statfs sys_statfs -138 common fstatfs sys_fstatfs -139 common sysfs sys_sysfs -140 common getpriority sys_getpriority -141 common setpriority sys_setpriority -142 common sched_setparam sys_sched_setparam -143 common sched_getparam sys_sched_getparam -144 common sched_setscheduler sys_sched_setscheduler -145 common sched_getscheduler sys_sched_getscheduler -146 common sched_get_priority_max sys_sched_get_priority_max -147 common sched_get_priority_min sys_sched_get_priority_min -148 common sched_rr_get_interval sys_sched_rr_get_interval -149 common mlock sys_mlock -150 common munlock sys_munlock -151 common mlockall sys_mlockall -152 common munlockall sys_munlockall -153 common vhangup sys_vhangup -154 common modify_ldt sys_modify_ldt -155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl -157 common prctl sys_prctl -158 common arch_prctl sys_arch_prctl -159 common adjtimex sys_adjtimex -160 common setrlimit sys_setrlimit -161 common chroot sys_chroot -162 common sync sys_sync -163 common acct sys_acct -164 common settimeofday sys_settimeofday -165 common mount sys_mount -166 common umount2 sys_umount -167 common swapon sys_swapon -168 common swapoff sys_swapoff -169 common reboot sys_reboot -170 common sethostname sys_sethostname -171 common setdomainname sys_setdomainname -172 common iopl sys_iopl/ptregs -173 common ioperm sys_ioperm +135 common personality __x64_sys_personality +136 common ustat __x64_sys_ustat +137 common statfs __x64_sys_statfs +138 common fstatfs __x64_sys_fstatfs +139 common sysfs __x64_sys_sysfs +140 common getpriority __x64_sys_getpriority +141 common setpriority __x64_sys_setpriority +142 common sched_setparam __x64_sys_sched_setparam +143 common sched_getparam __x64_sys_sched_getparam +144 common sched_setscheduler __x64_sys_sched_setscheduler +145 common sched_getscheduler __x64_sys_sched_getscheduler +146 common sched_get_priority_max __x64_sys_sched_get_priority_max +147 common sched_get_priority_min __x64_sys_sched_get_priority_min +148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval +149 common mlock __x64_sys_mlock +150 common munlock __x64_sys_munlock +151 common mlockall __x64_sys_mlockall +152 common munlockall __x64_sys_munlockall +153 common vhangup __x64_sys_vhangup +154 common modify_ldt __x64_sys_modify_ldt +155 common pivot_root __x64_sys_pivot_root +156 64 _sysctl __x64_sys_sysctl +157 common prctl __x64_sys_prctl +158 common arch_prctl __x64_sys_arch_prctl +159 common adjtimex __x64_sys_adjtimex +160 common setrlimit __x64_sys_setrlimit +161 common chroot __x64_sys_chroot +162 common sync __x64_sys_sync +163 common acct __x64_sys_acct +164 common settimeofday __x64_sys_settimeofday +165 common mount __x64_sys_mount +166 common umount2 __x64_sys_umount +167 common swapon __x64_sys_swapon +168 common swapoff __x64_sys_swapoff +169 common reboot __x64_sys_reboot +170 common sethostname __x64_sys_sethostname +171 common setdomainname __x64_sys_setdomainname +172 common iopl __x64_sys_iopl/ptregs +173 common ioperm __x64_sys_ioperm 174 64 create_module -175 common init_module sys_init_module -176 common delete_module sys_delete_module +175 common init_module __x64_sys_init_module +176 common delete_module __x64_sys_delete_module 177 64 get_kernel_syms 178 64 query_module -179 common quotactl sys_quotactl +179 common quotactl __x64_sys_quotactl 180 64 nfsservctl 181 common getpmsg 182 common putpmsg 183 common afs_syscall 184 common tuxcall 185 common security -186 common gettid sys_gettid -187 common readahead sys_readahead -188 common setxattr sys_setxattr -189 common lsetxattr sys_lsetxattr -190 common fsetxattr sys_fsetxattr -191 common getxattr sys_getxattr -192 common lgetxattr sys_lgetxattr -193 common fgetxattr sys_fgetxattr -194 common listxattr sys_listxattr -195 common llistxattr sys_llistxattr -196 common flistxattr sys_flistxattr -197 common removexattr sys_removexattr -198 common lremovexattr sys_lremovexattr -199 common fremovexattr sys_fremovexattr -200 common tkill sys_tkill -201 common time sys_time -202 common futex sys_futex -203 common sched_setaffinity sys_sched_setaffinity -204 common sched_getaffinity sys_sched_getaffinity +186 common gettid __x64_sys_gettid +187 common readahead __x64_sys_readahead +188 common setxattr __x64_sys_setxattr +189 common lsetxattr __x64_sys_lsetxattr +190 common fsetxattr __x64_sys_fsetxattr +191 common getxattr __x64_sys_getxattr +192 common lgetxattr __x64_sys_lgetxattr +193 common fgetxattr __x64_sys_fgetxattr +194 common listxattr __x64_sys_listxattr +195 common llistxattr __x64_sys_llistxattr +196 common flistxattr __x64_sys_flistxattr +197 common removexattr __x64_sys_removexattr +198 common lremovexattr __x64_sys_lremovexattr +199 common fremovexattr __x64_sys_fremovexattr +200 common tkill __x64_sys_tkill +201 common time __x64_sys_time +202 common futex __x64_sys_futex +203 common sched_setaffinity __x64_sys_sched_setaffinity +204 common sched_getaffinity __x64_sys_sched_getaffinity 205 64 set_thread_area -206 64 io_setup sys_io_setup -207 common io_destroy sys_io_destroy -208 common io_getevents sys_io_getevents -209 64 io_submit sys_io_submit -210 common io_cancel sys_io_cancel +206 64 io_setup __x64_sys_io_setup +207 common io_destroy __x64_sys_io_destroy +208 common io_getevents __x64_sys_io_getevents +209 64 io_submit __x64_sys_io_submit +210 common io_cancel __x64_sys_io_cancel 211 64 get_thread_area -212 common lookup_dcookie sys_lookup_dcookie -213 common epoll_create sys_epoll_create +212 common lookup_dcookie __x64_sys_lookup_dcookie +213 common epoll_create __x64_sys_epoll_create 214 64 epoll_ctl_old 215 64 epoll_wait_old -216 common remap_file_pages sys_remap_file_pages -217 common getdents64 sys_getdents64 -218 common set_tid_address sys_set_tid_address -219 common restart_syscall sys_restart_syscall -220 common semtimedop sys_semtimedop -221 common fadvise64 sys_fadvise64 -222 64 timer_create sys_timer_create -223 common timer_settime sys_timer_settime -224 common timer_gettime sys_timer_gettime -225 common timer_getoverrun sys_timer_getoverrun -226 common timer_delete sys_timer_delete -227 common clock_settime sys_clock_settime -228 common clock_gettime sys_clock_gettime -229 common clock_getres sys_clock_getres -230 common clock_nanosleep sys_clock_nanosleep -231 common exit_group sys_exit_group -232 common epoll_wait sys_epoll_wait -233 common epoll_ctl sys_epoll_ctl -234 common tgkill sys_tgkill -235 common utimes sys_utimes +216 common remap_file_pages __x64_sys_remap_file_pages +217 common getdents64 __x64_sys_getdents64 +218 common set_tid_address __x64_sys_set_tid_address +219 common restart_syscall __x64_sys_restart_syscall +220 common semtimedop __x64_sys_semtimedop +221 common fadvise64 __x64_sys_fadvise64 +222 64 timer_create __x64_sys_timer_create +223 common timer_settime __x64_sys_timer_settime +224 common timer_gettime __x64_sys_timer_gettime +225 common timer_getoverrun __x64_sys_timer_getoverrun +226 common timer_delete __x64_sys_timer_delete +227 common clock_settime __x64_sys_clock_settime +228 common clock_gettime __x64_sys_clock_gettime +229 common clock_getres __x64_sys_clock_getres +230 common clock_nanosleep __x64_sys_clock_nanosleep +231 common exit_group __x64_sys_exit_group +232 common epoll_wait __x64_sys_epoll_wait +233 common epoll_ctl __x64_sys_epoll_ctl +234 common tgkill __x64_sys_tgkill +235 common utimes __x64_sys_utimes 236 64 vserver -237 common mbind sys_mbind -238 common set_mempolicy sys_set_mempolicy -239 common get_mempolicy sys_get_mempolicy -240 common mq_open sys_mq_open -241 common mq_unlink sys_mq_unlink -242 common mq_timedsend sys_mq_timedsend -243 common mq_timedreceive sys_mq_timedreceive -244 64 mq_notify sys_mq_notify -245 common mq_getsetattr sys_mq_getsetattr -246 64 kexec_load sys_kexec_load -247 64 waitid sys_waitid -248 common add_key sys_add_key -249 common request_key sys_request_key -250 common keyctl sys_keyctl -251 common ioprio_set sys_ioprio_set -252 common ioprio_get sys_ioprio_get -253 common inotify_init sys_inotify_init -254 common inotify_add_watch sys_inotify_add_watch -255 common inotify_rm_watch sys_inotify_rm_watch -256 common migrate_pages sys_migrate_pages -257 common openat sys_openat -258 common mkdirat sys_mkdirat -259 common mknodat sys_mknodat -260 common fchownat sys_fchownat -261 common futimesat sys_futimesat -262 common newfstatat sys_newfstatat -263 common unlinkat sys_unlinkat -264 common renameat sys_renameat -265 common linkat sys_linkat -266 common symlinkat sys_symlinkat -267 common readlinkat sys_readlinkat -268 common fchmodat sys_fchmodat -269 common faccessat sys_faccessat -270 common pselect6 sys_pselect6 -271 common ppoll sys_ppoll -272 common unshare sys_unshare -273 64 set_robust_list sys_set_robust_list -274 64 get_robust_list sys_get_robust_list -275 common splice sys_splice -276 common tee sys_tee -277 common sync_file_range sys_sync_file_range -278 64 vmsplice sys_vmsplice -279 64 move_pages sys_move_pages -280 common utimensat sys_utimensat -281 common epoll_pwait sys_epoll_pwait -282 common signalfd sys_signalfd -283 common timerfd_create sys_timerfd_create -284 common eventfd sys_eventfd -285 common fallocate sys_fallocate -286 common timerfd_settime sys_timerfd_settime -287 common timerfd_gettime sys_timerfd_gettime -288 common accept4 sys_accept4 -289 common signalfd4 sys_signalfd4 -290 common eventfd2 sys_eventfd2 -291 common epoll_create1 sys_epoll_create1 -292 common dup3 sys_dup3 -293 common pipe2 sys_pipe2 -294 common inotify_init1 sys_inotify_init1 -295 64 preadv sys_preadv -296 64 pwritev sys_pwritev -297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo -298 common perf_event_open sys_perf_event_open -299 64 recvmmsg sys_recvmmsg -300 common fanotify_init sys_fanotify_init -301 common fanotify_mark sys_fanotify_mark -302 common prlimit64 sys_prlimit64 -303 common name_to_handle_at sys_name_to_handle_at -304 common open_by_handle_at sys_open_by_handle_at -305 common clock_adjtime sys_clock_adjtime -306 common syncfs sys_syncfs -307 64 sendmmsg sys_sendmmsg -308 common setns sys_setns -309 common getcpu sys_getcpu -310 64 process_vm_readv sys_process_vm_readv -311 64 process_vm_writev sys_process_vm_writev -312 common kcmp sys_kcmp -313 common finit_module sys_finit_module -314 common sched_setattr sys_sched_setattr -315 common sched_getattr sys_sched_getattr -316 common renameat2 sys_renameat2 -317 common seccomp sys_seccomp -318 common getrandom sys_getrandom -319 common memfd_create sys_memfd_create -320 common kexec_file_load sys_kexec_file_load -321 common bpf sys_bpf -322 64 execveat sys_execveat/ptregs -323 common userfaultfd sys_userfaultfd -324 common membarrier sys_membarrier -325 common mlock2 sys_mlock2 -326 common copy_file_range sys_copy_file_range -327 64 preadv2 sys_preadv2 -328 64 pwritev2 sys_pwritev2 -329 common pkey_mprotect sys_pkey_mprotect -330 common pkey_alloc sys_pkey_alloc -331 common pkey_free sys_pkey_free -332 common statx sys_statx +237 common mbind __x64_sys_mbind +238 common set_mempolicy __x64_sys_set_mempolicy +239 common get_mempolicy __x64_sys_get_mempolicy +240 common mq_open __x64_sys_mq_open +241 common mq_unlink __x64_sys_mq_unlink +242 common mq_timedsend __x64_sys_mq_timedsend +243 common mq_timedreceive __x64_sys_mq_timedreceive +244 64 mq_notify __x64_sys_mq_notify +245 common mq_getsetattr __x64_sys_mq_getsetattr +246 64 kexec_load __x64_sys_kexec_load +247 64 waitid __x64_sys_waitid +248 common add_key __x64_sys_add_key +249 common request_key __x64_sys_request_key +250 common keyctl __x64_sys_keyctl +251 common ioprio_set __x64_sys_ioprio_set +252 common ioprio_get __x64_sys_ioprio_get +253 common inotify_init __x64_sys_inotify_init +254 common inotify_add_watch __x64_sys_inotify_add_watch +255 common inotify_rm_watch __x64_sys_inotify_rm_watch +256 common migrate_pages __x64_sys_migrate_pages +257 common openat __x64_sys_openat +258 common mkdirat __x64_sys_mkdirat +259 common mknodat __x64_sys_mknodat +260 common fchownat __x64_sys_fchownat +261 common futimesat __x64_sys_futimesat +262 common newfstatat __x64_sys_newfstatat +263 common unlinkat __x64_sys_unlinkat +264 common renameat __x64_sys_renameat +265 common linkat __x64_sys_linkat +266 common symlinkat __x64_sys_symlinkat +267 common readlinkat __x64_sys_readlinkat +268 common fchmodat __x64_sys_fchmodat +269 common faccessat __x64_sys_faccessat +270 common pselect6 __x64_sys_pselect6 +271 common ppoll __x64_sys_ppoll +272 common unshare __x64_sys_unshare +273 64 set_robust_list __x64_sys_set_robust_list +274 64 get_robust_list __x64_sys_get_robust_list +275 common splice __x64_sys_splice +276 common tee __x64_sys_tee +277 common sync_file_range __x64_sys_sync_file_range +278 64 vmsplice __x64_sys_vmsplice +279 64 move_pages __x64_sys_move_pages +280 common utimensat __x64_sys_utimensat +281 common epoll_pwait __x64_sys_epoll_pwait +282 common signalfd __x64_sys_signalfd +283 common timerfd_create __x64_sys_timerfd_create +284 common eventfd __x64_sys_eventfd +285 common fallocate __x64_sys_fallocate +286 common timerfd_settime __x64_sys_timerfd_settime +287 common timerfd_gettime __x64_sys_timerfd_gettime +288 common accept4 __x64_sys_accept4 +289 common signalfd4 __x64_sys_signalfd4 +290 common eventfd2 __x64_sys_eventfd2 +291 common epoll_create1 __x64_sys_epoll_create1 +292 common dup3 __x64_sys_dup3 +293 common pipe2 __x64_sys_pipe2 +294 common inotify_init1 __x64_sys_inotify_init1 +295 64 preadv __x64_sys_preadv +296 64 pwritev __x64_sys_pwritev +297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo +298 common perf_event_open __x64_sys_perf_event_open +299 64 recvmmsg __x64_sys_recvmmsg +300 common fanotify_init __x64_sys_fanotify_init +301 common fanotify_mark __x64_sys_fanotify_mark +302 common prlimit64 __x64_sys_prlimit64 +303 common name_to_handle_at __x64_sys_name_to_handle_at +304 common open_by_handle_at __x64_sys_open_by_handle_at +305 common clock_adjtime __x64_sys_clock_adjtime +306 common syncfs __x64_sys_syncfs +307 64 sendmmsg __x64_sys_sendmmsg +308 common setns __x64_sys_setns +309 common getcpu __x64_sys_getcpu +310 64 process_vm_readv __x64_sys_process_vm_readv +311 64 process_vm_writev __x64_sys_process_vm_writev +312 common kcmp __x64_sys_kcmp +313 common finit_module __x64_sys_finit_module +314 common sched_setattr __x64_sys_sched_setattr +315 common sched_getattr __x64_sys_sched_getattr +316 common renameat2 __x64_sys_renameat2 +317 common seccomp __x64_sys_seccomp +318 common getrandom __x64_sys_getrandom +319 common memfd_create __x64_sys_memfd_create +320 common kexec_file_load __x64_sys_kexec_file_load +321 common bpf __x64_sys_bpf +322 64 execveat __x64_sys_execveat/ptregs +323 common userfaultfd __x64_sys_userfaultfd +324 common membarrier __x64_sys_membarrier +325 common mlock2 __x64_sys_mlock2 +326 common copy_file_range __x64_sys_copy_file_range +327 64 preadv2 __x64_sys_preadv2 +328 64 pwritev2 __x64_sys_pwritev2 +329 common pkey_mprotect __x64_sys_pkey_mprotect +330 common pkey_alloc __x64_sys_pkey_alloc +331 common pkey_free __x64_sys_pkey_free +332 common statx __x64_sys_statx # # x32-specific system call numbers start at 512 to avoid cache impact -# for native 64-bit operation. +# for native 64-bit operation. The __x32_compat_sys stubs are created +# on-the-fly for compat_sys_*() compatibility system calls if X86_X32 +# is defined. # -512 x32 rt_sigaction compat_sys_rt_sigaction +512 x32 rt_sigaction __x32_compat_sys_rt_sigaction 513 x32 rt_sigreturn sys32_x32_rt_sigreturn -514 x32 ioctl compat_sys_ioctl -515 x32 readv compat_sys_readv -516 x32 writev compat_sys_writev -517 x32 recvfrom compat_sys_recvfrom -518 x32 sendmsg compat_sys_sendmsg -519 x32 recvmsg compat_sys_recvmsg -520 x32 execve compat_sys_execve/ptregs -521 x32 ptrace compat_sys_ptrace -522 x32 rt_sigpending compat_sys_rt_sigpending -523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait -524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo -525 x32 sigaltstack compat_sys_sigaltstack -526 x32 timer_create compat_sys_timer_create -527 x32 mq_notify compat_sys_mq_notify -528 x32 kexec_load compat_sys_kexec_load -529 x32 waitid compat_sys_waitid -530 x32 set_robust_list compat_sys_set_robust_list -531 x32 get_robust_list compat_sys_get_robust_list -532 x32 vmsplice compat_sys_vmsplice -533 x32 move_pages compat_sys_move_pages -534 x32 preadv compat_sys_preadv64 -535 x32 pwritev compat_sys_pwritev64 -536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo -537 x32 recvmmsg compat_sys_recvmmsg -538 x32 sendmmsg compat_sys_sendmmsg -539 x32 process_vm_readv compat_sys_process_vm_readv -540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt -543 x32 io_setup compat_sys_io_setup -544 x32 io_submit compat_sys_io_submit -545 x32 execveat compat_sys_execveat/ptregs -546 x32 preadv2 compat_sys_preadv64v2 -547 x32 pwritev2 compat_sys_pwritev64v2 +514 x32 ioctl __x32_compat_sys_ioctl +515 x32 readv __x32_compat_sys_readv +516 x32 writev __x32_compat_sys_writev +517 x32 recvfrom __x32_compat_sys_recvfrom +518 x32 sendmsg __x32_compat_sys_sendmsg +519 x32 recvmsg __x32_compat_sys_recvmsg +520 x32 execve __x32_compat_sys_execve/ptregs +521 x32 ptrace __x32_compat_sys_ptrace +522 x32 rt_sigpending __x32_compat_sys_rt_sigpending +523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait +524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo +525 x32 sigaltstack __x32_compat_sys_sigaltstack +526 x32 timer_create __x32_compat_sys_timer_create +527 x32 mq_notify __x32_compat_sys_mq_notify +528 x32 kexec_load __x32_compat_sys_kexec_load +529 x32 waitid __x32_compat_sys_waitid +530 x32 set_robust_list __x32_compat_sys_set_robust_list +531 x32 get_robust_list __x32_compat_sys_get_robust_list +532 x32 vmsplice __x32_compat_sys_vmsplice +533 x32 move_pages __x32_compat_sys_move_pages +534 x32 preadv __x32_compat_sys_preadv64 +535 x32 pwritev __x32_compat_sys_pwritev64 +536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo +537 x32 recvmmsg __x32_compat_sys_recvmmsg +538 x32 sendmmsg __x32_compat_sys_sendmmsg +539 x32 process_vm_readv __x32_compat_sys_process_vm_readv +540 x32 process_vm_writev __x32_compat_sys_process_vm_writev +541 x32 setsockopt __x32_compat_sys_setsockopt +542 x32 getsockopt __x32_compat_sys_getsockopt +543 x32 io_setup __x32_compat_sys_io_setup +544 x32 io_submit __x32_compat_sys_io_submit +545 x32 execveat __x32_compat_sys_execveat/ptregs +546 x32 preadv2 __x32_compat_sys_preadv64v2 +547 x32 pwritev2 __x32_compat_sys_pwritev64v2 diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 944070e98a2c..63eb49082774 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -175,7 +175,7 @@ static const struct option options[] = { OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run (default: 5 secs)"), OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), - OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"), + OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via reads (can be mixed with -W)"), OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"), OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"), OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"), diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 4aca13f23b9d..1c41b4eaf73c 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv) #ifdef HAVE_LIBELF_SUPPORT "probe", #endif -#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) +#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) "trace", #endif NULL }; diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 506564651cda..57393e94d156 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) }; argc = parse_options(argc, argv, options, record_mem_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + PARSE_OPT_KEEP_UNKNOWN); rec_argc = argc + 9; /* max number of arguments */ rec_argv = calloc(rec_argc + 1, sizeof(char *)); @@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv) } argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, - mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); + mem_usage, PARSE_OPT_KEEP_UNKNOWN); if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) usage_with_options(mem_usage, mem_options); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 313c42423393..e0a9845b6cbc 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample, break; case PERF_RECORD_SWITCH: case PERF_RECORD_SWITCH_CPU_WIDE: - if (has(SWITCH_OUT)) + if (has(SWITCH_OUT)) { ret += fprintf(fp, "S"); + if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) + ret += fprintf(fp, "p"); + } default: break; } @@ -2801,11 +2804,11 @@ int find_scripts(char **scripts_array, char **scripts_path_array) for_each_lang(scripts_path, scripts_dir, lang_dirent) { scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, lang_dirent->d_name); -#ifdef NO_LIBPERL +#ifndef HAVE_LIBPERL_SUPPORT if (strstr(lang_path, "perl")) continue; #endif -#ifdef NO_LIBPYTHON +#ifndef HAVE_LIBPYTHON_SUPPORT if (strstr(lang_path, "python")) continue; #endif diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f5c454855908..f17dc601b0f3 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -172,6 +172,7 @@ static bool interval_count; static const char *output_name; static int output_fd; static int print_free_counters_hint; +static int print_mixed_hw_group_error; struct perf_stat { bool record; @@ -1126,6 +1127,30 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); } +static bool is_mixed_hw_group(struct perf_evsel *counter) +{ + struct perf_evlist *evlist = counter->evlist; + u32 pmu_type = counter->attr.type; + struct perf_evsel *pos; + + if (counter->nr_members < 2) + return false; + + evlist__for_each_entry(evlist, pos) { + /* software events can be part of any hardware group */ + if (pos->attr.type == PERF_TYPE_SOFTWARE) + continue; + if (pmu_type == PERF_TYPE_SOFTWARE) { + pmu_type = pos->attr.type; + continue; + } + if (pmu_type != pos->attr.type) + return true; + } + + return false; +} + static void printout(int id, int nr, struct perf_evsel *counter, double uval, char *prefix, u64 run, u64 ena, double noise, struct runtime_stat *st) @@ -1178,8 +1203,11 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval, counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, csv_sep); - if (counter->supported) + if (counter->supported) { print_free_counters_hint = 1; + if (is_mixed_hw_group(counter)) + print_mixed_hw_group_error = 1; + } fprintf(stat_config.output, "%-*s%s", csv_output ? 0 : unit_width, @@ -1256,7 +1284,8 @@ static void uniquify_event_name(struct perf_evsel *counter) char *new_name; char *config; - if (!counter->pmu_name || !strncmp(counter->name, counter->pmu_name, + if (counter->uniquified_name || + !counter->pmu_name || !strncmp(counter->name, counter->pmu_name, strlen(counter->pmu_name))) return; @@ -1274,6 +1303,8 @@ static void uniquify_event_name(struct perf_evsel *counter) counter->name = new_name; } } + + counter->uniquified_name = true; } static void collect_all_aliases(struct perf_evsel *counter, @@ -1757,6 +1788,11 @@ static void print_footer(void) " echo 0 > /proc/sys/kernel/nmi_watchdog\n" " perf stat ...\n" " echo 1 > /proc/sys/kernel/nmi_watchdog\n"); + + if (print_mixed_hw_group_error) + fprintf(output, + "The events in group usually have to be from " + "the same PMU. Try reorganizing the group.\n"); } static void print_counters(struct timespec *ts, int argc, const char **argv) @@ -1943,7 +1979,8 @@ static const struct option stat_options[] = { OPT_STRING(0, "post", &post_cmd, "command", "command to run after to the measured command"), OPT_UINTEGER('I', "interval-print", &stat_config.interval, - "print counts at regular interval in ms (>= 10)"), + "print counts at regular interval in ms " + "(overhead is possible for values <= 100ms)"), OPT_INTEGER(0, "interval-count", &stat_config.times, "print counts for fixed number of times"), OPT_UINTEGER(0, "timeout", &stat_config.timeout, @@ -2923,17 +2960,6 @@ int cmd_stat(int argc, const char **argv) } } - if (interval && interval < 100) { - if (interval < 10) { - pr_err("print interval must be >= 10ms\n"); - parse_options_usage(stat_usage, stat_options, "I", 1); - goto out; - } else - pr_warning("print interval < 100ms. " - "The overhead percentage could be high in some cases. " - "Please proceed with caution.\n"); - } - if (stat_config.times && interval) interval_count = true; else if (stat_config.times && !interval) { diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c index 2abe3910d6b6..50df168be326 100644 --- a/tools/perf/builtin-version.c +++ b/tools/perf/builtin-version.c @@ -60,7 +60,10 @@ static void library_status(void) STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); STATUS(HAVE_GLIBC_SUPPORT, glibc); STATUS(HAVE_GTK2_SUPPORT, gtk2); +#ifndef HAVE_SYSCALL_TABLE_SUPPORT STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); +#endif + STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table); STATUS(HAVE_LIBBFD_SUPPORT, libbfd); STATUS(HAVE_LIBELF_SUPPORT, libelf); STATUS(HAVE_LIBNUMA_SUPPORT, libnuma); diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 1659029d03fc..20a08cb32332 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -73,7 +73,7 @@ static struct cmd_struct commands[] = { { "lock", cmd_lock, 0 }, { "kvm", cmd_kvm, 0 }, { "test", cmd_test, 0 }, -#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) +#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) { "trace", cmd_trace, 0 }, #endif { "inject", cmd_inject, 0 }, @@ -491,7 +491,7 @@ int main(int argc, const char **argv) argv[0] = cmd; } if (strstarts(cmd, "trace")) { -#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) +#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) setup_path(); argv[0] = "trace"; return cmd_trace(argc, argv); diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv index ca7682748a4b..78bcf7f8e206 100644 --- a/tools/perf/pmu-events/arch/s390/mapfile.csv +++ b/tools/perf/pmu-events/arch/s390/mapfile.csv @@ -1,6 +1,6 @@ Family-model,Version,Filename,EventType -209[78],1,cf_z10,core -281[78],1,cf_z196,core -282[78],1,cf_zec12,core -296[45],1,cf_z13,core -3906,3,cf_z14,core +^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core +^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core +^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core +^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core +^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index 93656f2fd53a..7e3cce3bcf3b 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -29,7 +29,6 @@ GenuineIntel-6-4D,v13,silvermont,core GenuineIntel-6-4C,v13,silvermont,core GenuineIntel-6-2A,v15,sandybridge,core GenuineIntel-6-2C,v2,westmereep-dp,core -GenuineIntel-6-2C,v2,westmereep-dp,core GenuineIntel-6-25,v2,westmereep-sp,core GenuineIntel-6-2F,v2,westmereex,core GenuineIntel-6-55,v1,skylakex,core diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling index f906b793196f..8a33ca4f9e1f 100644 --- a/tools/perf/tests/attr/test-record-group-sampling +++ b/tools/perf/tests/attr/test-record-group-sampling @@ -35,3 +35,6 @@ inherit=0 # sampling disabled sample_freq=0 sample_period=0 +freq=0 +write_backward=0 +sample_id_all=0 diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c index e4123c1b0e88..1ca5106df5f1 100644 --- a/tools/perf/tests/bpf-script-example.c +++ b/tools/perf/tests/bpf-script-example.c @@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = { .max_entries = 1, }; -SEC("func=SyS_epoll_pwait") +SEC("func=do_epoll_wait") int bpf_func__SyS_epoll_pwait(void *ctx) { int ind =0; diff --git a/tools/perf/tests/bpf-script-test-kbuild.c b/tools/perf/tests/bpf-script-test-kbuild.c index 3626924740d8..ff3ec8337f0a 100644 --- a/tools/perf/tests/bpf-script-test-kbuild.c +++ b/tools/perf/tests/bpf-script-test-kbuild.c @@ -9,7 +9,6 @@ #define SEC(NAME) __attribute__((section(NAME), used)) #include <uapi/linux/fs.h> -#include <uapi/asm/ptrace.h> SEC("func=vfs_llseek") int bpf_func__vfs_llseek(void *ctx) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 625f5a6772af..cac8f8889bc3 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -118,6 +118,7 @@ static struct test generic_tests[] = { { .desc = "Breakpoint accounting", .func = test__bp_accounting, + .is_supported = test__bp_signal_is_supported, }, { .desc = "Number of exit events of a simple workload", diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index bb8e6bcb0d96..0919b0793e5b 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); evsels[i] = perf_evsel__newtp("syscalls", name); if (IS_ERR(evsels[i])) { - pr_debug("perf_evsel__new\n"); + pr_debug("perf_evsel__new(%s)\n", name); goto out_delete_evlist; } diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 1ecc1f0ff84a..016882dbbc16 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -19,12 +19,10 @@ trace_libc_inet_pton_backtrace() { expected[1]=".*inet_pton[[:space:]]\($libc\)$" case "$(uname -m)" in s390x) - eventattr='call-graph=dwarf' + eventattr='call-graph=dwarf,max-stack=4' expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$" - expected[3]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$" + expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$" expected[4]="main[[:space:]]\(.*/bin/ping.*\)$" - expected[5]="__libc_start_main[[:space:]]\($libc\)$" - expected[6]="_start[[:space:]]\(.*/bin/ping.*\)$" ;; *) eventattr='max-stack=3' diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c index 417e3ecfe9d7..9f68077b241b 100644 --- a/tools/perf/trace/beauty/mmap.c +++ b/tools/perf/trace/beauty/mmap.c @@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, P_MMAP_FLAG(EXECUTABLE); P_MMAP_FLAG(FILE); P_MMAP_FLAG(FIXED); +#ifdef MAP_FIXED_NOREPLACE + P_MMAP_FLAG(FIXED_NOREPLACE); +#endif P_MMAP_FLAG(GROWSDOWN); P_MMAP_FLAG(HUGETLB); P_MMAP_FLAG(LOCKED); diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 12c099a87f8b..3781d74088a7 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -692,6 +692,7 @@ static int annotate_browser__run(struct annotate_browser *browser, "J Toggle showing number of jump sources on targets\n" "n Search next string\n" "o Toggle disassembler output/simplified view\n" + "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n" "s Toggle source code view\n" "t Circulate percent, total period, samples view\n" "/ Search string\n" @@ -719,6 +720,10 @@ static int annotate_browser__run(struct annotate_browser *browser, notes->options->use_offset = !notes->options->use_offset; annotation__update_column_widths(notes); continue; + case 'O': + if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) + notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; + continue; case 'j': notes->options->jump_arrows = !notes->options->jump_arrows; continue; diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 0eec06c105c6..e5f247247daa 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, "h/?/F1 Show this window\n" \ "UP/DOWN/PGUP\n" \ "PGDN/SPACE Navigate\n" \ - "q/ESC/CTRL+C Exit browser\n\n" \ + "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \ "For multiple event sessions:\n\n" \ "TAB/UNTAB Switch events\n\n" \ "For symbolic views (--sort has sym):\n\n" \ diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index fbad8dfbb186..536ee148bff8 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -46,6 +46,7 @@ struct annotation_options annotation__default_options = { .use_offset = true, .jump_arrows = true, + .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS, }; const char *disassembler_style; @@ -2512,7 +2513,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati if (!notes->options->use_offset) { printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); } else { - if (al->jump_sources) { + if (al->jump_sources && + notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { if (notes->options->show_nr_jumps) { int prev; printed = scnprintf(bf, sizeof(bf), "%*d ", @@ -2523,9 +2525,14 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati obj__printf(obj, bf); obj__set_color(obj, prev); } - +print_addr: printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", notes->widths.target, addr); + } else if (ins__is_call(&disasm_line(al)->ins) && + notes->options->offset_level >= ANNOTATION__OFFSET_CALL) { + goto print_addr; + } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { + goto print_addr; } else { printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " "); @@ -2642,10 +2649,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes, */ static struct annotation_config { const char *name; - bool *value; + void *value; } annotation__configs[] = { ANNOTATION__CFG(hide_src_code), ANNOTATION__CFG(jump_arrows), + ANNOTATION__CFG(offset_level), ANNOTATION__CFG(show_linenr), ANNOTATION__CFG(show_nr_jumps), ANNOTATION__CFG(show_nr_samples), @@ -2677,8 +2685,16 @@ static int annotation__config(const char *var, const char *value, if (cfg == NULL) pr_debug("%s variable unknown, ignoring...", var); - else - *cfg->value = perf_config_bool(name, value); + else if (strcmp(var, "annotate.offset_level") == 0) { + perf_config_int(cfg->value, name, value); + + if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL) + *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL; + else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL) + *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL; + } else { + *(bool *)cfg->value = perf_config_bool(name, value); + } return 0; } diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index db8d09bea07e..f28a9e43421d 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -70,8 +70,17 @@ struct annotation_options { show_nr_jumps, show_nr_samples, show_total_period; + u8 offset_level; }; +enum { + ANNOTATION__OFFSET_JUMP_TARGETS = 1, + ANNOTATION__OFFSET_CALL, + ANNOTATION__MAX_OFFSET_LEVEL, +}; + +#define ANNOTATION__MIN_OFFSET_LEVEL ANNOTATION__OFFSET_JUMP_TARGETS + extern struct annotation_options annotation__default_options; struct annotation; diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index 640af88331b4..c8b98fa22997 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * SPDX-License-Identifier: GPL-2.0 - * * Copyright(C) 2015-2018 Linaro Limited. * * Author: Tor Jeremiassen <tor@ti.com> diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 1b0d422373be..40020b1ca54f 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * SPDX-License-Identifier: GPL-2.0 - * * Copyright(C) 2015-2018 Linaro Limited. * * Author: Tor Jeremiassen <tor@ti.com> diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 5864d5dca616..37f8d48179ca 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index f0a6cbd033cc..98ff3a6a3d50 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) { bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; - const char *in_out = out ? "OUT" : "IN "; + const char *in_out = !out ? "IN " : + !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? + "OUT " : "OUT preempt"; if (event->header.type == PERF_RECORD_SWITCH) return fprintf(fp, " %s\n", in_out); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1ac8d9236efd..4cd2cf93f726 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -930,8 +930,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, * than leader in case leader 'leads' the sampling. */ if ((leader != evsel) && leader->sample_read) { - attr->sample_freq = 0; - attr->sample_period = 0; + attr->freq = 0; + attr->sample_freq = 0; + attr->sample_period = 0; + attr->write_backward = 0; + attr->sample_id_all = 0; } if (opts->no_samples) @@ -1922,7 +1925,8 @@ try_fallback: goto fallback_missing_features; } else if (!perf_missing_features.group_read && evsel->attr.inherit && - (evsel->attr.read_format & PERF_FORMAT_GROUP)) { + (evsel->attr.read_format & PERF_FORMAT_GROUP) && + perf_evsel__is_group_leader(evsel)) { perf_missing_features.group_read = true; pr_debug2("switching off group read\n"); goto fallback_missing_features; @@ -2754,8 +2758,14 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, (paranoid = perf_event_paranoid()) > 1) { const char *name = perf_evsel__name(evsel); char *new_name; + const char *sep = ":"; - if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0) + /* Is there already the separator in the name. */ + if (strchr(name, '/') || + strchr(name, ':')) + sep = ""; + + if (asprintf(&new_name, "%s%su", name, sep) < 0) return false; if (evsel->name) @@ -2870,8 +2880,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, #if defined(__i386__) || defined(__x86_64__) if (evsel->attr.type == PERF_TYPE_HARDWARE) return scnprintf(msg, size, "%s", - "No hardware sampling interrupt available.\n" - "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); + "No hardware sampling interrupt available.\n"); #endif break; case EBUSY: @@ -2894,8 +2903,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, return scnprintf(msg, size, "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" - "/bin/dmesg may provide additional information.\n" - "No CONFIG_PERF_EVENTS=y kernel support configured?", + "/bin/dmesg | grep -i perf may provide additional information.\n", err, str_error_r(err, sbuf, sizeof(sbuf)), perf_evsel__name(evsel)); } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index d3ee3af618ef..92ec009a292d 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -115,6 +115,7 @@ struct perf_evsel { unsigned int sample_size; int id_pos; int is_pos; + bool uniquified_name; bool snapshot; bool supported; bool needs_swap; diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh index ff17920a5ebc..c3cef36d4176 100755 --- a/tools/perf/util/generate-cmdlist.sh +++ b/tools/perf/util/generate-cmdlist.sh @@ -38,7 +38,7 @@ do done echo "#endif /* HAVE_LIBELF_SUPPORT */" -echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)" +echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)" sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | sort | while read cmd diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 121df1683c36..a8bff2178fbc 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1320,7 +1320,8 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) dir = opendir(path); if (!dir) { - pr_warning("failed: can't open node sysfs data\n"); + pr_debug2("%s: could't read %s, does this arch have topology information?\n", + __func__, path); return -1; } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 2eca8478e24f..32d50492505d 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1019,13 +1019,6 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type) return ret; } -static void map_groups__fixup_end(struct map_groups *mg) -{ - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - __map_groups__fixup_end(mg, i); -} - static char *get_kernel_version(const char *root_dir) { char version[PATH_MAX]; @@ -1233,6 +1226,7 @@ int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); const char *name = NULL; + struct map *map; u64 addr = 0; int ret; @@ -1259,13 +1253,25 @@ int machine__create_kernel_maps(struct machine *machine) machine__destroy_kernel_maps(machine); return -1; } - machine__set_kernel_mmap(machine, addr, 0); + + /* we have a real start address now, so re-order the kmaps */ + map = machine__kernel_map(machine); + + map__get(map); + map_groups__remove(&machine->kmaps, map); + + /* assume it's the last in the kmaps */ + machine__set_kernel_mmap(machine, addr, ~0ULL); + + map_groups__insert(&machine->kmaps, map); + map__put(map); } - /* - * Now that we have all the maps created, just set the ->end of them: - */ - map_groups__fixup_end(&machine->kmaps); + /* update end address of the kernel map using adjacent module address */ + map = map__next(machine__kernel_map(machine)); + if (map) + machine__set_kernel_mmap(machine, addr, map->start); + return 0; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 064bdcb7bd78..d2fb597c9a8c 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -539,9 +539,10 @@ static bool pmu_is_uncore(const char *name) /* * PMU CORE devices have different name other than cpu in sysfs on some - * platforms. looking for possible sysfs files to identify as core device. + * platforms. + * Looking for possible sysfs files to identify the arm core device. */ -static int is_pmu_core(const char *name) +static int is_arm_pmu_core(const char *name) { struct stat st; char path[PATH_MAX]; @@ -550,18 +551,18 @@ static int is_pmu_core(const char *name) if (!sysfs) return 0; - /* Look for cpu sysfs (x86 and others) */ - scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs); - if ((stat(path, &st) == 0) && - (strncmp(name, "cpu", strlen("cpu")) == 0)) - return 1; - /* Look for cpu sysfs (specific to arm) */ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus", sysfs, name); if (stat(path, &st) == 0) return 1; + /* Look for cpu sysfs (specific to s390) */ + scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s", + sysfs, name); + if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5)) + return 1; + return 0; } @@ -580,7 +581,7 @@ char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) * cpuid string generated on this platform. * Otherwise return non-zero. */ -int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) +int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) { regex_t re; regmatch_t pmatch[1]; @@ -662,6 +663,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) struct pmu_events_map *map; struct pmu_event *pe; const char *name = pmu->name; + const char *pname; map = perf_pmu__find_map(pmu); if (!map) @@ -680,11 +682,9 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) break; } - if (!is_pmu_core(name)) { - /* check for uncore devices */ - if (pe->pmu == NULL) - continue; - if (strncmp(pe->pmu, name, strlen(pe->pmu))) + if (!is_arm_pmu_core(name)) { + pname = pe->pmu ? pe->pmu : "cpu"; + if (strncmp(pname, name, strlen(pname))) continue; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 62b2dd2253eb..1466814ebada 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2091,16 +2091,14 @@ static bool symbol__read_kptr_restrict(void) int symbol__annotation_init(void) { + if (symbol_conf.init_annotation) + return 0; + if (symbol_conf.initialized) { pr_err("Annotation needs to be init before symbol__init()\n"); return -1; } - if (symbol_conf.init_annotation) { - pr_warning("Annotation being initialized multiple times\n"); - return 0; - } - symbol_conf.priv_size += sizeof(struct annotation); symbol_conf.init_annotation = true; return 0; diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c index 895122d638dd..0ee7f568d60c 100644 --- a/tools/perf/util/syscalltbl.c +++ b/tools/perf/util/syscalltbl.c @@ -17,7 +17,7 @@ #include <stdlib.h> #include <linux/compiler.h> -#ifdef HAVE_SYSCALL_TABLE +#ifdef HAVE_SYSCALL_TABLE_SUPPORT #include <string.h> #include "string2.h" #include "util.h" @@ -139,7 +139,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); } -#else /* HAVE_SYSCALL_TABLE */ +#else /* HAVE_SYSCALL_TABLE_SUPPORT */ #include <libaudit.h> @@ -176,4 +176,4 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g { return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); } -#endif /* HAVE_SYSCALL_TABLE */ +#endif /* HAVE_SYSCALL_TABLE_SUPPORT */ diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 0ac9077f62a2..b1e5c3a2b8e3 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -98,7 +98,7 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) } } -#ifdef NO_LIBPYTHON +#ifndef HAVE_LIBPYTHON_SUPPORT void setup_python_scripting(void) { register_python_scripting(&python_scripting_unsupported_ops); @@ -161,7 +161,7 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) } } -#ifdef NO_LIBPERL +#ifndef HAVE_LIBPERL_SUPPORT void setup_perl_scripting(void) { register_perl_scripting(&perl_scripting_unsupported_ops); diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index 2cccbba64418..f304be71c278 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -56,6 +56,7 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM} # to compile vs uClibc, that can be done here as well. CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- CROSS_COMPILE ?= $(CROSS) +LD = $(CC) HOSTCC = gcc # check if compiler option is supported diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index cb166be4918d..4ea385be528f 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -138,6 +138,7 @@ static u32 handle[] = { }; static unsigned long dimm_fail_cmd_flags[NUM_DCR]; +static int dimm_fail_cmd_code[NUM_DCR]; struct nfit_test_fw { enum intel_fw_update_state state; @@ -892,8 +893,11 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) if (i >= ARRAY_SIZE(handle)) return -ENXIO; - if ((1 << func) & dimm_fail_cmd_flags[i]) + if ((1 << func) & dimm_fail_cmd_flags[i]) { + if (dimm_fail_cmd_code[i]) + return dimm_fail_cmd_code[i]; return -EIO; + } return i; } @@ -1162,12 +1166,12 @@ static int ars_state_init(struct device *dev, struct ars_state *ars_state) static void put_dimms(void *data) { - struct device **dimm_dev = data; + struct nfit_test *t = data; int i; - for (i = 0; i < NUM_DCR; i++) - if (dimm_dev[i]) - device_unregister(dimm_dev[i]); + for (i = 0; i < t->num_dcr; i++) + if (t->dimm_dev[i]) + device_unregister(t->dimm_dev[i]); } static struct class *nfit_test_dimm; @@ -1176,13 +1180,11 @@ static int dimm_name_to_id(struct device *dev) { int dimm; - if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 - || dimm >= NUM_DCR || dimm < 0) + if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1) return -ENXIO; return dimm; } - static ssize_t handle_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1191,7 +1193,7 @@ static ssize_t handle_show(struct device *dev, struct device_attribute *attr, if (dimm < 0) return dimm; - return sprintf(buf, "%#x", handle[dimm]); + return sprintf(buf, "%#x\n", handle[dimm]); } DEVICE_ATTR_RO(handle); @@ -1225,8 +1227,39 @@ static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(fail_cmd); +static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int dimm = dimm_name_to_id(dev); + + if (dimm < 0) + return dimm; + + return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]); +} + +static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + int dimm = dimm_name_to_id(dev); + unsigned long val; + ssize_t rc; + + if (dimm < 0) + return dimm; + + rc = kstrtol(buf, 0, &val); + if (rc) + return rc; + + dimm_fail_cmd_code[dimm] = val; + return size; +} +static DEVICE_ATTR_RW(fail_cmd_code); + static struct attribute *nfit_test_dimm_attributes[] = { &dev_attr_fail_cmd.attr, + &dev_attr_fail_cmd_code.attr, &dev_attr_handle.attr, NULL, }; @@ -1240,6 +1273,23 @@ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { NULL, }; +static int nfit_test_dimm_init(struct nfit_test *t) +{ + int i; + + if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t)) + return -ENOMEM; + for (i = 0; i < t->num_dcr; i++) { + t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, + &t->pdev.dev, 0, NULL, + nfit_test_dimm_attribute_groups, + "test_dimm%d", i + t->dcr_idx); + if (!t->dimm_dev[i]) + return -ENOMEM; + } + return 0; +} + static void smart_init(struct nfit_test *t) { int i; @@ -1335,17 +1385,8 @@ static int nfit_test0_alloc(struct nfit_test *t) if (!t->_fit) return -ENOMEM; - if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) + if (nfit_test_dimm_init(t)) return -ENOMEM; - for (i = 0; i < NUM_DCR; i++) { - t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, - &t->pdev.dev, 0, NULL, - nfit_test_dimm_attribute_groups, - "test_dimm%d", i); - if (!t->dimm_dev[i]) - return -ENOMEM; - } - smart_init(t); return ars_state_init(&t->pdev.dev, &t->ars_state); } @@ -1377,6 +1418,8 @@ static int nfit_test1_alloc(struct nfit_test *t) if (!t->spa_set[1]) return -ENOMEM; + if (nfit_test_dimm_init(t)) + return -ENOMEM; smart_init(t); return ars_state_init(&t->pdev.dev, &t->ars_state); } @@ -2222,6 +2265,9 @@ static void nfit_test1_setup(struct nfit_test *t) set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); + set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); + set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); + set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); } static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 9cf83f895d98..5e1ab2f0eb79 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -12,3 +12,6 @@ test_tcpbpf_user test_verifier_log feature test_libbpf_open +test_sock +test_sock_addr +urandom_read diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index faadbe233966..4123d0ab90ba 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -1108,7 +1108,7 @@ static void test_stacktrace_build_id(void) assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); - assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); + assert(system("./urandom_read") == 0); /* disable stack trace collection */ key = 0; val = 1; @@ -1158,7 +1158,7 @@ static void test_stacktrace_build_id(void) } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); CHECK(build_id_matches < 1, "build id match", - "Didn't find expected build ID from the map"); + "Didn't find expected build ID from the map\n"); disable_pmu: ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index 73bb20cfb9b7..f4d99fabc56d 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -13,6 +13,7 @@ #include <bpf/bpf.h> #include "cgroup_helpers.h" +#include "bpf_rlimit.h" #ifndef ARRAY_SIZE # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index d488f20926e8..2950f80ba7fb 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -15,6 +15,7 @@ #include <bpf/libbpf.h> #include "cgroup_helpers.h" +#include "bpf_rlimit.h" #define CG_PATH "/foo" #define CONNECT4_PROG_PATH "./connect4_prog.o" diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh index c6e1dcf992c4..9832a875a828 100755 --- a/tools/testing/selftests/bpf/test_sock_addr.sh +++ b/tools/testing/selftests/bpf/test_sock_addr.sh @@ -4,7 +4,7 @@ set -eu ping_once() { - ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1 + ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1 } wait_for_ip() @@ -13,7 +13,7 @@ wait_for_ip() echo -n "Wait for testing IPv4/IPv6 to become available " for _i in $(seq ${MAX_PING_TRIES}); do echo -n "." - if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then + if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then echo " OK" return fi diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile index 4e6d09fb166f..5c7d7001ad37 100644 --- a/tools/testing/selftests/filesystems/Makefile +++ b/tools/testing/selftests/filesystems/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_PROGS := dnotify_test devpts_pts -all: $(TEST_PROGS) -include ../lib.mk +TEST_GEN_PROGS := devpts_pts +TEST_GEN_PROGS_EXTENDED := dnotify_test -clean: - rm -fr $(TEST_PROGS) +include ../lib.mk diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile index 826f38d5dd19..261c81f08606 100644 --- a/tools/testing/selftests/firmware/Makefile +++ b/tools/testing/selftests/firmware/Makefile @@ -4,6 +4,7 @@ all: TEST_PROGS := fw_run_tests.sh +TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh include ../lib.mk diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh index 9ea31b57d71a..962d7f4ac627 100755 --- a/tools/testing/selftests/firmware/fw_lib.sh +++ b/tools/testing/selftests/firmware/fw_lib.sh @@ -154,11 +154,13 @@ test_finish() if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout fi - if [ "$OLD_FWPATH" = "" ]; then - OLD_FWPATH=" " - fi if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then - echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path + if [ "$OLD_FWPATH" = "" ]; then + # A zero-length write won't work; write a null byte + printf '\000' >/sys/module/firmware_class/parameters/path + else + echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path + fi fi if [ -f $FW ]; then rm -f "$FW" diff --git a/tools/testing/selftests/firmware/fw_run_tests.sh b/tools/testing/selftests/firmware/fw_run_tests.sh index 06d638e9dc62..cffdd4eb0a57 100755 --- a/tools/testing/selftests/firmware/fw_run_tests.sh +++ b/tools/testing/selftests/firmware/fw_run_tests.sh @@ -66,5 +66,5 @@ if [ -f $FW_FORCE_SYSFS_FALLBACK ]; then run_test_config_0003 else echo "Running basic kernel configuration, working with your config" - run_test + run_tests fi diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc index 786dce7e48be..2aabab363cfb 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc @@ -29,7 +29,7 @@ do_reset echo "Test extended error support" echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger -echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger &>/dev/null +! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then fail "Failed to generate extended error in histogram" fi diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc new file mode 100644 index 000000000000..c193dce611a2 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc @@ -0,0 +1,44 @@ +#!/bin/sh +# description: event trigger - test multiple actions on hist trigger + + +do_reset() { + reset_trigger + echo > set_event + clear_trace +} + +fail() { #msg + do_reset + echo $1 + exit_fail +} + +if [ ! -f set_event ]; then + echo "event tracing is not supported" + exit_unsupported +fi + +if [ ! -f synthetic_events ]; then + echo "synthetic event is not supported" + exit_unsupported +fi + +clear_synthetic_events +reset_tracer +do_reset + +echo "Test multiple actions on hist trigger" +echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events +TRIGGER1=events/sched/sched_wakeup/trigger +TRIGGER2=events/sched/sched_switch/trigger + +echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1 +echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2 +echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2 +echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2 +echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2 + +do_reset + +exit 0 diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index dc44de904797..2ddcc96ae456 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -4,17 +4,18 @@ top_srcdir = ../../../../ UNAME_M := $(shell uname -m) LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c -LIBKVM_x86_64 = lib/x86.c +LIBKVM_x86_64 = lib/x86.c lib/vmx.c TEST_GEN_PROGS_x86_64 = set_sregs_test TEST_GEN_PROGS_x86_64 += sync_regs_test +TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) LIBKVM += $(LIBKVM_$(UNAME_M)) INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ -CFLAGS += -O2 -g -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) +CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) # After inclusion, $(OUTPUT) is defined and # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 57974ad46373..637b7017b6ee 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -112,24 +112,27 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); -void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid); +struct kvm_cpuid2 *kvm_get_supported_cpuid(void); void vcpu_set_cpuid( struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); -struct kvm_cpuid2 *allocate_kvm_cpuid2(void); struct kvm_cpuid_entry2 * -find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, - uint32_t index); +kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); static inline struct kvm_cpuid_entry2 * -find_cpuid_entry(struct kvm_cpuid2 *cpuid, uint32_t function) +kvm_get_supported_cpuid_entry(uint32_t function) { - return find_cpuid_index_entry(cpuid, function, 0); + return kvm_get_supported_cpuid_index(function, 0); } struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); +typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, + vm_paddr_t vmxon_paddr, + vm_vaddr_t vmcs_vaddr, + vm_paddr_t vmcs_paddr); + struct kvm_userspace_memory_region * kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end); diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/vmx.h new file mode 100644 index 000000000000..6ed8499807fd --- /dev/null +++ b/tools/testing/selftests/kvm/include/vmx.h @@ -0,0 +1,494 @@ +/* + * tools/testing/selftests/kvm/include/vmx.h + * + * Copyright (C) 2018, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + */ + +#ifndef SELFTEST_KVM_VMX_H +#define SELFTEST_KVM_VMX_H + +#include <stdint.h> +#include "x86.h" + +#define CPUID_VMX_BIT 5 + +#define CPUID_VMX (1 << 5) + +/* + * Definitions of Primary Processor-Based VM-Execution Controls. + */ +#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 +#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 +#define CPU_BASED_HLT_EXITING 0x00000080 +#define CPU_BASED_INVLPG_EXITING 0x00000200 +#define CPU_BASED_MWAIT_EXITING 0x00000400 +#define CPU_BASED_RDPMC_EXITING 0x00000800 +#define CPU_BASED_RDTSC_EXITING 0x00001000 +#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 +#define CPU_BASED_CR3_STORE_EXITING 0x00010000 +#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 +#define CPU_BASED_CR8_STORE_EXITING 0x00100000 +#define CPU_BASED_TPR_SHADOW 0x00200000 +#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 +#define CPU_BASED_MOV_DR_EXITING 0x00800000 +#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 +#define CPU_BASED_USE_IO_BITMAPS 0x02000000 +#define CPU_BASED_MONITOR_TRAP 0x08000000 +#define CPU_BASED_USE_MSR_BITMAPS 0x10000000 +#define CPU_BASED_MONITOR_EXITING 0x20000000 +#define CPU_BASED_PAUSE_EXITING 0x40000000 +#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 + +#define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 + +/* + * Definitions of Secondary Processor-Based VM-Execution Controls. + */ +#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 +#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 +#define SECONDARY_EXEC_DESC 0x00000004 +#define SECONDARY_EXEC_RDTSCP 0x00000008 +#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 +#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 +#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 +#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 +#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 +#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 +#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 +#define SECONDARY_EXEC_RDRAND_EXITING 0x00000800 +#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 +#define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 +#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 +#define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 +#define SECONDARY_EXEC_ENABLE_PML 0x00020000 +#define SECONDARY_EPT_VE 0x00040000 +#define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000 +#define SECONDARY_EXEC_TSC_SCALING 0x02000000 + +#define PIN_BASED_EXT_INTR_MASK 0x00000001 +#define PIN_BASED_NMI_EXITING 0x00000008 +#define PIN_BASED_VIRTUAL_NMIS 0x00000020 +#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 +#define PIN_BASED_POSTED_INTR 0x00000080 + +#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 + +#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 +#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 +#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 +#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 +#define VM_EXIT_SAVE_IA32_PAT 0x00040000 +#define VM_EXIT_LOAD_IA32_PAT 0x00080000 +#define VM_EXIT_SAVE_IA32_EFER 0x00100000 +#define VM_EXIT_LOAD_IA32_EFER 0x00200000 +#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 + +#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff + +#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 +#define VM_ENTRY_IA32E_MODE 0x00000200 +#define VM_ENTRY_SMM 0x00000400 +#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 +#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 +#define VM_ENTRY_LOAD_IA32_PAT 0x00004000 +#define VM_ENTRY_LOAD_IA32_EFER 0x00008000 + +#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff + +#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f +#define VMX_MISC_SAVE_EFER_LMA 0x00000020 + +#define EXIT_REASON_FAILED_VMENTRY 0x80000000 +#define EXIT_REASON_EXCEPTION_NMI 0 +#define EXIT_REASON_EXTERNAL_INTERRUPT 1 +#define EXIT_REASON_TRIPLE_FAULT 2 +#define EXIT_REASON_PENDING_INTERRUPT 7 +#define EXIT_REASON_NMI_WINDOW 8 +#define EXIT_REASON_TASK_SWITCH 9 +#define EXIT_REASON_CPUID 10 +#define EXIT_REASON_HLT 12 +#define EXIT_REASON_INVD 13 +#define EXIT_REASON_INVLPG 14 +#define EXIT_REASON_RDPMC 15 +#define EXIT_REASON_RDTSC 16 +#define EXIT_REASON_VMCALL 18 +#define EXIT_REASON_VMCLEAR 19 +#define EXIT_REASON_VMLAUNCH 20 +#define EXIT_REASON_VMPTRLD 21 +#define EXIT_REASON_VMPTRST 22 +#define EXIT_REASON_VMREAD 23 +#define EXIT_REASON_VMRESUME 24 +#define EXIT_REASON_VMWRITE 25 +#define EXIT_REASON_VMOFF 26 +#define EXIT_REASON_VMON 27 +#define EXIT_REASON_CR_ACCESS 28 +#define EXIT_REASON_DR_ACCESS 29 +#define EXIT_REASON_IO_INSTRUCTION 30 +#define EXIT_REASON_MSR_READ 31 +#define EXIT_REASON_MSR_WRITE 32 +#define EXIT_REASON_INVALID_STATE 33 +#define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_MONITOR_INSTRUCTION 39 +#define EXIT_REASON_PAUSE_INSTRUCTION 40 +#define EXIT_REASON_MCE_DURING_VMENTRY 41 +#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 +#define EXIT_REASON_APIC_ACCESS 44 +#define EXIT_REASON_EOI_INDUCED 45 +#define EXIT_REASON_EPT_VIOLATION 48 +#define EXIT_REASON_EPT_MISCONFIG 49 +#define EXIT_REASON_INVEPT 50 +#define EXIT_REASON_RDTSCP 51 +#define EXIT_REASON_PREEMPTION_TIMER 52 +#define EXIT_REASON_INVVPID 53 +#define EXIT_REASON_WBINVD 54 +#define EXIT_REASON_XSETBV 55 +#define EXIT_REASON_APIC_WRITE 56 +#define EXIT_REASON_INVPCID 58 +#define EXIT_REASON_PML_FULL 62 +#define EXIT_REASON_XSAVES 63 +#define EXIT_REASON_XRSTORS 64 +#define LAST_EXIT_REASON 64 + +enum vmcs_field { + VIRTUAL_PROCESSOR_ID = 0x00000000, + POSTED_INTR_NV = 0x00000002, + GUEST_ES_SELECTOR = 0x00000800, + GUEST_CS_SELECTOR = 0x00000802, + GUEST_SS_SELECTOR = 0x00000804, + GUEST_DS_SELECTOR = 0x00000806, + GUEST_FS_SELECTOR = 0x00000808, + GUEST_GS_SELECTOR = 0x0000080a, + GUEST_LDTR_SELECTOR = 0x0000080c, + GUEST_TR_SELECTOR = 0x0000080e, + GUEST_INTR_STATUS = 0x00000810, + GUEST_PML_INDEX = 0x00000812, + HOST_ES_SELECTOR = 0x00000c00, + HOST_CS_SELECTOR = 0x00000c02, + HOST_SS_SELECTOR = 0x00000c04, + HOST_DS_SELECTOR = 0x00000c06, + HOST_FS_SELECTOR = 0x00000c08, + HOST_GS_SELECTOR = 0x00000c0a, + HOST_TR_SELECTOR = 0x00000c0c, + IO_BITMAP_A = 0x00002000, + IO_BITMAP_A_HIGH = 0x00002001, + IO_BITMAP_B = 0x00002002, + IO_BITMAP_B_HIGH = 0x00002003, + MSR_BITMAP = 0x00002004, + MSR_BITMAP_HIGH = 0x00002005, + VM_EXIT_MSR_STORE_ADDR = 0x00002006, + VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, + VM_EXIT_MSR_LOAD_ADDR = 0x00002008, + VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, + VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, + VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, + PML_ADDRESS = 0x0000200e, + PML_ADDRESS_HIGH = 0x0000200f, + TSC_OFFSET = 0x00002010, + TSC_OFFSET_HIGH = 0x00002011, + VIRTUAL_APIC_PAGE_ADDR = 0x00002012, + VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, + APIC_ACCESS_ADDR = 0x00002014, + APIC_ACCESS_ADDR_HIGH = 0x00002015, + POSTED_INTR_DESC_ADDR = 0x00002016, + POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, + EPT_POINTER = 0x0000201a, + EPT_POINTER_HIGH = 0x0000201b, + EOI_EXIT_BITMAP0 = 0x0000201c, + EOI_EXIT_BITMAP0_HIGH = 0x0000201d, + EOI_EXIT_BITMAP1 = 0x0000201e, + EOI_EXIT_BITMAP1_HIGH = 0x0000201f, + EOI_EXIT_BITMAP2 = 0x00002020, + EOI_EXIT_BITMAP2_HIGH = 0x00002021, + EOI_EXIT_BITMAP3 = 0x00002022, + EOI_EXIT_BITMAP3_HIGH = 0x00002023, + VMREAD_BITMAP = 0x00002026, + VMREAD_BITMAP_HIGH = 0x00002027, + VMWRITE_BITMAP = 0x00002028, + VMWRITE_BITMAP_HIGH = 0x00002029, + XSS_EXIT_BITMAP = 0x0000202C, + XSS_EXIT_BITMAP_HIGH = 0x0000202D, + TSC_MULTIPLIER = 0x00002032, + TSC_MULTIPLIER_HIGH = 0x00002033, + GUEST_PHYSICAL_ADDRESS = 0x00002400, + GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, + VMCS_LINK_POINTER = 0x00002800, + VMCS_LINK_POINTER_HIGH = 0x00002801, + GUEST_IA32_DEBUGCTL = 0x00002802, + GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, + GUEST_IA32_PAT = 0x00002804, + GUEST_IA32_PAT_HIGH = 0x00002805, + GUEST_IA32_EFER = 0x00002806, + GUEST_IA32_EFER_HIGH = 0x00002807, + GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, + GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, + GUEST_PDPTR0 = 0x0000280a, + GUEST_PDPTR0_HIGH = 0x0000280b, + GUEST_PDPTR1 = 0x0000280c, + GUEST_PDPTR1_HIGH = 0x0000280d, + GUEST_PDPTR2 = 0x0000280e, + GUEST_PDPTR2_HIGH = 0x0000280f, + GUEST_PDPTR3 = 0x00002810, + GUEST_PDPTR3_HIGH = 0x00002811, + GUEST_BNDCFGS = 0x00002812, + GUEST_BNDCFGS_HIGH = 0x00002813, + HOST_IA32_PAT = 0x00002c00, + HOST_IA32_PAT_HIGH = 0x00002c01, + HOST_IA32_EFER = 0x00002c02, + HOST_IA32_EFER_HIGH = 0x00002c03, + HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, + HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, + PIN_BASED_VM_EXEC_CONTROL = 0x00004000, + CPU_BASED_VM_EXEC_CONTROL = 0x00004002, + EXCEPTION_BITMAP = 0x00004004, + PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, + PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, + CR3_TARGET_COUNT = 0x0000400a, + VM_EXIT_CONTROLS = 0x0000400c, + VM_EXIT_MSR_STORE_COUNT = 0x0000400e, + VM_EXIT_MSR_LOAD_COUNT = 0x00004010, + VM_ENTRY_CONTROLS = 0x00004012, + VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, + VM_ENTRY_INTR_INFO_FIELD = 0x00004016, + VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, + VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, + TPR_THRESHOLD = 0x0000401c, + SECONDARY_VM_EXEC_CONTROL = 0x0000401e, + PLE_GAP = 0x00004020, + PLE_WINDOW = 0x00004022, + VM_INSTRUCTION_ERROR = 0x00004400, + VM_EXIT_REASON = 0x00004402, + VM_EXIT_INTR_INFO = 0x00004404, + VM_EXIT_INTR_ERROR_CODE = 0x00004406, + IDT_VECTORING_INFO_FIELD = 0x00004408, + IDT_VECTORING_ERROR_CODE = 0x0000440a, + VM_EXIT_INSTRUCTION_LEN = 0x0000440c, + VMX_INSTRUCTION_INFO = 0x0000440e, + GUEST_ES_LIMIT = 0x00004800, + GUEST_CS_LIMIT = 0x00004802, + GUEST_SS_LIMIT = 0x00004804, + GUEST_DS_LIMIT = 0x00004806, + GUEST_FS_LIMIT = 0x00004808, + GUEST_GS_LIMIT = 0x0000480a, + GUEST_LDTR_LIMIT = 0x0000480c, + GUEST_TR_LIMIT = 0x0000480e, + GUEST_GDTR_LIMIT = 0x00004810, + GUEST_IDTR_LIMIT = 0x00004812, + GUEST_ES_AR_BYTES = 0x00004814, + GUEST_CS_AR_BYTES = 0x00004816, + GUEST_SS_AR_BYTES = 0x00004818, + GUEST_DS_AR_BYTES = 0x0000481a, + GUEST_FS_AR_BYTES = 0x0000481c, + GUEST_GS_AR_BYTES = 0x0000481e, + GUEST_LDTR_AR_BYTES = 0x00004820, + GUEST_TR_AR_BYTES = 0x00004822, + GUEST_INTERRUPTIBILITY_INFO = 0x00004824, + GUEST_ACTIVITY_STATE = 0X00004826, + GUEST_SYSENTER_CS = 0x0000482A, + VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, + HOST_IA32_SYSENTER_CS = 0x00004c00, + CR0_GUEST_HOST_MASK = 0x00006000, + CR4_GUEST_HOST_MASK = 0x00006002, + CR0_READ_SHADOW = 0x00006004, + CR4_READ_SHADOW = 0x00006006, + CR3_TARGET_VALUE0 = 0x00006008, + CR3_TARGET_VALUE1 = 0x0000600a, + CR3_TARGET_VALUE2 = 0x0000600c, + CR3_TARGET_VALUE3 = 0x0000600e, + EXIT_QUALIFICATION = 0x00006400, + GUEST_LINEAR_ADDRESS = 0x0000640a, + GUEST_CR0 = 0x00006800, + GUEST_CR3 = 0x00006802, + GUEST_CR4 = 0x00006804, + GUEST_ES_BASE = 0x00006806, + GUEST_CS_BASE = 0x00006808, + GUEST_SS_BASE = 0x0000680a, + GUEST_DS_BASE = 0x0000680c, + GUEST_FS_BASE = 0x0000680e, + GUEST_GS_BASE = 0x00006810, + GUEST_LDTR_BASE = 0x00006812, + GUEST_TR_BASE = 0x00006814, + GUEST_GDTR_BASE = 0x00006816, + GUEST_IDTR_BASE = 0x00006818, + GUEST_DR7 = 0x0000681a, + GUEST_RSP = 0x0000681c, + GUEST_RIP = 0x0000681e, + GUEST_RFLAGS = 0x00006820, + GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, + GUEST_SYSENTER_ESP = 0x00006824, + GUEST_SYSENTER_EIP = 0x00006826, + HOST_CR0 = 0x00006c00, + HOST_CR3 = 0x00006c02, + HOST_CR4 = 0x00006c04, + HOST_FS_BASE = 0x00006c06, + HOST_GS_BASE = 0x00006c08, + HOST_TR_BASE = 0x00006c0a, + HOST_GDTR_BASE = 0x00006c0c, + HOST_IDTR_BASE = 0x00006c0e, + HOST_IA32_SYSENTER_ESP = 0x00006c10, + HOST_IA32_SYSENTER_EIP = 0x00006c12, + HOST_RSP = 0x00006c14, + HOST_RIP = 0x00006c16, +}; + +struct vmx_msr_entry { + uint32_t index; + uint32_t reserved; + uint64_t value; +} __attribute__ ((aligned(16))); + +static inline int vmxon(uint64_t phys) +{ + uint8_t ret; + + __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" + : [ret]"=rm"(ret) + : [pa]"m"(phys) + : "cc", "memory"); + + return ret; +} + +static inline void vmxoff(void) +{ + __asm__ __volatile__("vmxoff"); +} + +static inline int vmclear(uint64_t vmcs_pa) +{ + uint8_t ret; + + __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" + : [ret]"=rm"(ret) + : [pa]"m"(vmcs_pa) + : "cc", "memory"); + + return ret; +} + +static inline int vmptrld(uint64_t vmcs_pa) +{ + uint8_t ret; + + __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" + : [ret]"=rm"(ret) + : [pa]"m"(vmcs_pa) + : "cc", "memory"); + + return ret; +} + +/* + * No guest state (e.g. GPRs) is established by this vmlaunch. + */ +static inline int vmlaunch(void) +{ + int ret; + + __asm__ __volatile__("push %%rbp;" + "push %%rcx;" + "push %%rdx;" + "push %%rsi;" + "push %%rdi;" + "push $0;" + "vmwrite %%rsp, %[host_rsp];" + "lea 1f(%%rip), %%rax;" + "vmwrite %%rax, %[host_rip];" + "vmlaunch;" + "incq (%%rsp);" + "1: pop %%rax;" + "pop %%rdi;" + "pop %%rsi;" + "pop %%rdx;" + "pop %%rcx;" + "pop %%rbp;" + : [ret]"=&a"(ret) + : [host_rsp]"r"((uint64_t)HOST_RSP), + [host_rip]"r"((uint64_t)HOST_RIP) + : "memory", "cc", "rbx", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15"); + return ret; +} + +/* + * No guest state (e.g. GPRs) is established by this vmresume. + */ +static inline int vmresume(void) +{ + int ret; + + __asm__ __volatile__("push %%rbp;" + "push %%rcx;" + "push %%rdx;" + "push %%rsi;" + "push %%rdi;" + "push $0;" + "vmwrite %%rsp, %[host_rsp];" + "lea 1f(%%rip), %%rax;" + "vmwrite %%rax, %[host_rip];" + "vmresume;" + "incq (%%rsp);" + "1: pop %%rax;" + "pop %%rdi;" + "pop %%rsi;" + "pop %%rdx;" + "pop %%rcx;" + "pop %%rbp;" + : [ret]"=&a"(ret) + : [host_rsp]"r"((uint64_t)HOST_RSP), + [host_rip]"r"((uint64_t)HOST_RIP) + : "memory", "cc", "rbx", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15"); + return ret; +} + +static inline int vmread(uint64_t encoding, uint64_t *value) +{ + uint64_t tmp; + uint8_t ret; + + __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" + : [value]"=rm"(tmp), [ret]"=rm"(ret) + : [encoding]"r"(encoding) + : "cc", "memory"); + + *value = tmp; + return ret; +} + +/* + * A wrapper around vmread that ignores errors and returns zero if the + * vmread instruction fails. + */ +static inline uint64_t vmreadz(uint64_t encoding) +{ + uint64_t value = 0; + vmread(encoding, &value); + return value; +} + +static inline int vmwrite(uint64_t encoding, uint64_t value) +{ + uint8_t ret; + + __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" + : [ret]"=rm"(ret) + : [value]"rm"(value), [encoding]"r"(encoding) + : "cc", "memory"); + + return ret; +} + +static inline uint32_t vmcs_revision(void) +{ + return rdmsr(MSR_IA32_VMX_BASIC); +} + +void prepare_for_vmx_operation(void); +void prepare_vmcs(void *guest_rip, void *guest_rsp); +struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid, + vmx_guest_code_t guest_code); + +#endif /* !SELFTEST_KVM_VMX_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 7ca1bb40c498..2cedfda181d4 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -378,7 +378,7 @@ int kvm_memcmp_hva_gva(void *hva, * complicated. This function uses a reasonable default length for * the array and performs the appropriate allocation. */ -struct kvm_cpuid2 *allocate_kvm_cpuid2(void) +static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) { struct kvm_cpuid2 *cpuid; int nent = 100; @@ -402,17 +402,21 @@ struct kvm_cpuid2 *allocate_kvm_cpuid2(void) * Input Args: None * * Output Args: - * cpuid - The supported KVM CPUID * - * Return: void + * Return: The supported KVM CPUID * * Get the guest CPUID supported by KVM. */ -void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) +struct kvm_cpuid2 *kvm_get_supported_cpuid(void) { + static struct kvm_cpuid2 *cpuid; int ret; int kvm_fd; + if (cpuid) + return cpuid; + + cpuid = allocate_kvm_cpuid2(); kvm_fd = open(KVM_DEV_PATH, O_RDONLY); TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", KVM_DEV_PATH, kvm_fd, errno); @@ -422,6 +426,7 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) ret, errno); close(kvm_fd); + return cpuid; } /* Locate a cpuid entry. @@ -435,12 +440,13 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) * Return: A pointer to the cpuid entry. Never returns NULL. */ struct kvm_cpuid_entry2 * -find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, - uint32_t index) +kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) { + struct kvm_cpuid2 *cpuid; struct kvm_cpuid_entry2 *entry = NULL; int i; + cpuid = kvm_get_supported_cpuid(); for (i = 0; i < cpuid->nent; i++) { if (cpuid->entries[i].function == function && cpuid->entries[i].index == index) { @@ -1435,7 +1441,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, sparsebit_idx_t pg; TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " - "not divisable by page size.\n" + "not divisible by page size.\n" " paddr_min: 0x%lx page_size: 0x%x", paddr_min, vm->page_size); diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index 0c5cf3e0cb6f..b132bc95d183 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -121,7 +121,7 @@ * avoided by moving the setting of the nodes mask bits into * the previous nodes num_after setting. * - * + Node starting index is evenly divisable by the number of bits + * + Node starting index is evenly divisible by the number of bits * within a nodes mask member. * * + Nodes never represent a range of bits that wrap around the @@ -1741,7 +1741,7 @@ void sparsebit_validate_internal(struct sparsebit *s) /* Validate node index is divisible by the mask size */ if (nodep->idx % MASK_BITS) { - fprintf(stderr, "Node index not divisable by " + fprintf(stderr, "Node index not divisible by " "mask size,\n" " nodep: %p nodep->idx: 0x%lx " "MASK_BITS: %lu\n", diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/vmx.c new file mode 100644 index 000000000000..0231bc0aae7b --- /dev/null +++ b/tools/testing/selftests/kvm/lib/vmx.c @@ -0,0 +1,243 @@ +/* + * tools/testing/selftests/kvm/lib/x86.c + * + * Copyright (C) 2018, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + */ + +#define _GNU_SOURCE /* for program_invocation_name */ + +#include "test_util.h" +#include "kvm_util.h" +#include "x86.h" +#include "vmx.h" + +/* Create a default VM for VMX tests. + * + * Input Args: + * vcpuid - The id of the single VCPU to add to the VM. + * guest_code - The vCPU's entry point + * + * Output Args: None + * + * Return: + * Pointer to opaque structure that describes the created VM. + */ +struct kvm_vm * +vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code) +{ + struct kvm_cpuid2 *cpuid; + struct kvm_vm *vm; + vm_vaddr_t vmxon_vaddr; + vm_paddr_t vmxon_paddr; + vm_vaddr_t vmcs_vaddr; + vm_paddr_t vmcs_paddr; + + vm = vm_create_default(vcpuid, (void *) guest_code); + + /* Enable nesting in CPUID */ + vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); + + /* Setup of a region of guest memory for the vmxon region. */ + vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); + vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr); + + /* Setup of a region of guest memory for a vmcs. */ + vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); + vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr); + + vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr, + vmcs_paddr); + + return vm; +} + +void prepare_for_vmx_operation(void) +{ + uint64_t feature_control; + uint64_t required; + unsigned long cr0; + unsigned long cr4; + + /* + * Ensure bits in CR0 and CR4 are valid in VMX operation: + * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. + * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. + */ + __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); + cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); + cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); + __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); + + __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); + cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); + cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); + /* Enable VMX operation */ + cr4 |= X86_CR4_VMXE; + __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); + + /* + * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: + * Bit 0: Lock bit. If clear, VMXON causes a #GP. + * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON + * outside of SMX causes a #GP. + */ + required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + required |= FEATURE_CONTROL_LOCKED; + feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); + if ((feature_control & required) != required) + wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required); +} + +/* + * Initialize the control fields to the most basic settings possible. + */ +static inline void init_vmcs_control_fields(void) +{ + vmwrite(VIRTUAL_PROCESSOR_ID, 0); + vmwrite(POSTED_INTR_NV, 0); + + vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PINBASED_CTLS)); + vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS)); + vmwrite(EXCEPTION_BITMAP, 0); + vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); + vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ + vmwrite(CR3_TARGET_COUNT, 0); + vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | + VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ + vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); + vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); + vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | + VM_ENTRY_IA32E_MODE); /* 64-bit guest */ + vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); + vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); + vmwrite(TPR_THRESHOLD, 0); + vmwrite(SECONDARY_VM_EXEC_CONTROL, 0); + + vmwrite(CR0_GUEST_HOST_MASK, 0); + vmwrite(CR4_GUEST_HOST_MASK, 0); + vmwrite(CR0_READ_SHADOW, get_cr0()); + vmwrite(CR4_READ_SHADOW, get_cr4()); +} + +/* + * Initialize the host state fields based on the current host state, with + * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch + * or vmresume. + */ +static inline void init_vmcs_host_state(void) +{ + uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); + + vmwrite(HOST_ES_SELECTOR, get_es()); + vmwrite(HOST_CS_SELECTOR, get_cs()); + vmwrite(HOST_SS_SELECTOR, get_ss()); + vmwrite(HOST_DS_SELECTOR, get_ds()); + vmwrite(HOST_FS_SELECTOR, get_fs()); + vmwrite(HOST_GS_SELECTOR, get_gs()); + vmwrite(HOST_TR_SELECTOR, get_tr()); + + if (exit_controls & VM_EXIT_LOAD_IA32_PAT) + vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); + if (exit_controls & VM_EXIT_LOAD_IA32_EFER) + vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); + if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) + vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, + rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); + + vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); + + vmwrite(HOST_CR0, get_cr0()); + vmwrite(HOST_CR3, get_cr3()); + vmwrite(HOST_CR4, get_cr4()); + vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); + vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); + vmwrite(HOST_TR_BASE, + get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr()))); + vmwrite(HOST_GDTR_BASE, get_gdt_base()); + vmwrite(HOST_IDTR_BASE, get_idt_base()); + vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); + vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); +} + +/* + * Initialize the guest state fields essentially as a clone of + * the host state fields. Some host state fields have fixed + * values, and we set the corresponding guest state fields accordingly. + */ +static inline void init_vmcs_guest_state(void *rip, void *rsp) +{ + vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); + vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); + vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); + vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); + vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); + vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); + vmwrite(GUEST_LDTR_SELECTOR, 0); + vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); + vmwrite(GUEST_INTR_STATUS, 0); + vmwrite(GUEST_PML_INDEX, 0); + + vmwrite(VMCS_LINK_POINTER, -1ll); + vmwrite(GUEST_IA32_DEBUGCTL, 0); + vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); + vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); + vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, + vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); + + vmwrite(GUEST_ES_LIMIT, -1); + vmwrite(GUEST_CS_LIMIT, -1); + vmwrite(GUEST_SS_LIMIT, -1); + vmwrite(GUEST_DS_LIMIT, -1); + vmwrite(GUEST_FS_LIMIT, -1); + vmwrite(GUEST_GS_LIMIT, -1); + vmwrite(GUEST_LDTR_LIMIT, -1); + vmwrite(GUEST_TR_LIMIT, 0x67); + vmwrite(GUEST_GDTR_LIMIT, 0xffff); + vmwrite(GUEST_IDTR_LIMIT, 0xffff); + vmwrite(GUEST_ES_AR_BYTES, + vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_CS_AR_BYTES, 0xa09b); + vmwrite(GUEST_SS_AR_BYTES, 0xc093); + vmwrite(GUEST_DS_AR_BYTES, + vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_FS_AR_BYTES, + vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_GS_AR_BYTES, + vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); + vmwrite(GUEST_TR_AR_BYTES, 0x8b); + vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); + vmwrite(GUEST_ACTIVITY_STATE, 0); + vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); + vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); + + vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); + vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); + vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); + vmwrite(GUEST_ES_BASE, 0); + vmwrite(GUEST_CS_BASE, 0); + vmwrite(GUEST_SS_BASE, 0); + vmwrite(GUEST_DS_BASE, 0); + vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); + vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); + vmwrite(GUEST_LDTR_BASE, 0); + vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); + vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); + vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); + vmwrite(GUEST_DR7, 0x400); + vmwrite(GUEST_RSP, (uint64_t)rsp); + vmwrite(GUEST_RIP, (uint64_t)rip); + vmwrite(GUEST_RFLAGS, 2); + vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); + vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); + vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); +} + +void prepare_vmcs(void *guest_rip, void *guest_rsp) +{ + init_vmcs_control_fields(); + init_vmcs_host_state(); + init_vmcs_guest_state(guest_rip, guest_rsp); +} diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c new file mode 100644 index 000000000000..8f7f62093add --- /dev/null +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c @@ -0,0 +1,231 @@ +/* + * gtests/tests/vmx_tsc_adjust_test.c + * + * Copyright (C) 2018, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * + * IA32_TSC_ADJUST test + * + * According to the SDM, "if an execution of WRMSR to the + * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, + * the logical processor also adds (or subtracts) value X from the + * IA32_TSC_ADJUST MSR. + * + * Note that when L1 doesn't intercept writes to IA32_TSC, a + * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC + * value. + * + * This test verifies that this unusual case is handled correctly. + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "x86.h" +#include "vmx.h" + +#include <string.h> +#include <sys/ioctl.h> + +#ifndef MSR_IA32_TSC_ADJUST +#define MSR_IA32_TSC_ADJUST 0x3b +#endif + +#define PAGE_SIZE 4096 +#define VCPU_ID 5 + +#define TSC_ADJUST_VALUE (1ll << 32) +#define TSC_OFFSET_VALUE -(1ll << 48) + +enum { + PORT_ABORT = 0x1000, + PORT_REPORT, + PORT_DONE, +}; + +struct vmx_page { + vm_vaddr_t virt; + vm_paddr_t phys; +}; + +enum { + VMXON_PAGE = 0, + VMCS_PAGE, + MSR_BITMAP_PAGE, + + NUM_VMX_PAGES, +}; + +struct kvm_single_msr { + struct kvm_msrs header; + struct kvm_msr_entry entry; +} __attribute__((packed)); + +/* The virtual machine object. */ +static struct kvm_vm *vm; + +/* Array of vmx_page descriptors that is shared with the guest. */ +struct vmx_page *vmx_pages; + +#define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg)) +static void do_exit_to_l0(uint16_t port, unsigned long arg) +{ + __asm__ __volatile__("in %[port], %%al" + : + : [port]"d"(port), "D"(arg) + : "rax"); +} + + +#define GUEST_ASSERT(_condition) do { \ + if (!(_condition)) \ + exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \ +} while (0) + +static void check_ia32_tsc_adjust(int64_t max) +{ + int64_t adjust; + + adjust = rdmsr(MSR_IA32_TSC_ADJUST); + exit_to_l0(PORT_REPORT, adjust); + GUEST_ASSERT(adjust <= max); +} + +static void l2_guest_code(void) +{ + uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; + + wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); + check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); + + /* Exit to L1 */ + __asm__ __volatile__("vmcall"); +} + +static void l1_guest_code(struct vmx_page *vmx_pages) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + uint32_t control; + uintptr_t save_cr3; + + GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); + wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); + check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); + + prepare_for_vmx_operation(); + + /* Enter VMX root operation. */ + *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision(); + GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys)); + + /* Load a VMCS. */ + *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision(); + GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys)); + GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys)); + + /* Prepare the VMCS for L2 execution. */ + prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); + control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); + control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; + vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); + vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys); + vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); + + /* Jump into L2. First, test failure to load guest CR3. */ + save_cr3 = vmreadz(GUEST_CR3); + vmwrite(GUEST_CR3, -1ull); + GUEST_ASSERT(!vmlaunch()); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == + (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); + check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); + vmwrite(GUEST_CR3, save_cr3); + + GUEST_ASSERT(!vmlaunch()); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + + check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); + + exit_to_l0(PORT_DONE, 0); +} + +static void allocate_vmx_page(struct vmx_page *page) +{ + vm_vaddr_t virt; + + virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0); + memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE); + + page->virt = virt; + page->phys = addr_gva2gpa(vm, virt); +} + +static vm_vaddr_t allocate_vmx_pages(void) +{ + vm_vaddr_t vmx_pages_vaddr; + int i; + + vmx_pages_vaddr = vm_vaddr_alloc( + vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0); + + vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr); + + for (i = 0; i < NUM_VMX_PAGES; i++) + allocate_vmx_page(&vmx_pages[i]); + + return vmx_pages_vaddr; +} + +void report(int64_t val) +{ + printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", + val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); +} + +int main(int argc, char *argv[]) +{ + vm_vaddr_t vmx_pages_vaddr; + struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); + + if (!(entry->ecx & CPUID_VMX)) { + printf("nested VMX not enabled, skipping test"); + return 0; + } + + vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); + + /* Allocate VMX pages and shared descriptors (vmx_pages). */ + vmx_pages_vaddr = allocate_vmx_pages(); + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr); + + for (;;) { + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct kvm_regs regs; + + vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + vcpu_regs_get(vm, VCPU_ID, ®s); + + switch (run->io.port) { + case PORT_ABORT: + TEST_ASSERT(false, "%s", (const char *) regs.rdi); + /* NOT REACHED */ + case PORT_REPORT: + report(regs.rdi); + break; + case PORT_DONE: + goto done; + default: + TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); + } + } + + kvm_vm_free(vm); +done: + return 0; +} diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 195e9d4739a9..c1b1a4dc6a96 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -20,10 +20,10 @@ all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) .ONESHELL: define RUN_TESTS - @export KSFT_TAP_LEVEL=`echo 1`; - @test_num=`echo 0`; - @echo "TAP version 13"; - @for TEST in $(1); do \ + @export KSFT_TAP_LEVEL=`echo 1`; \ + test_num=`echo 0`; \ + echo "TAP version 13"; \ + for TEST in $(1); do \ BASENAME_TEST=`basename $$TEST`; \ test_num=`echo $$test_num+1 | bc`; \ echo "selftests: $$BASENAME_TEST"; \ diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 785fc18a16b4..3ff81a478dbe 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -6,6 +6,7 @@ CFLAGS += -I../../../../usr/include/ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh +TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 5b012f4981d4..6f289a49e5ec 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json @@ -66,7 +66,7 @@ "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667", "expExitCode": "0", "verifyCmd": "$TC action get action bpf index 667", - "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c default-action pipe.*index 667 ref", + "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref", "matchCount": "1", "teardown": [ "$TC action flush action bpf", @@ -92,10 +92,15 @@ "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667", "expExitCode": "255", "verifyCmd": "$TC action get action bpf index 667", - "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9].*index 667 ref", + "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref", "matchCount": "0", "teardown": [ - "$TC action flush action bpf", + [ + "$TC action flush action bpf", + 0, + 1, + 255 + ], "rm -f _c.o" ] }, diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c index 40370354d4c1..c9c3281077bc 100644 --- a/tools/testing/selftests/x86/test_syscall_vdso.c +++ b/tools/testing/selftests/x86/test_syscall_vdso.c @@ -100,12 +100,19 @@ asm ( " shl $32, %r8\n" " orq $0x7f7f7f7f, %r8\n" " movq %r8, %r9\n" - " movq %r8, %r10\n" - " movq %r8, %r11\n" - " movq %r8, %r12\n" - " movq %r8, %r13\n" - " movq %r8, %r14\n" - " movq %r8, %r15\n" + " incq %r9\n" + " movq %r9, %r10\n" + " incq %r10\n" + " movq %r10, %r11\n" + " incq %r11\n" + " movq %r11, %r12\n" + " incq %r12\n" + " movq %r12, %r13\n" + " incq %r13\n" + " movq %r13, %r14\n" + " incq %r14\n" + " movq %r14, %r15\n" + " incq %r15\n" " ret\n" " .code32\n" " .popsection\n" @@ -128,12 +135,13 @@ int check_regs64(void) int err = 0; int num = 8; uint64_t *r64 = ®s64.r8; + uint64_t expected = 0x7f7f7f7f7f7f7f7fULL; if (!kernel_is_64bit) return 0; do { - if (*r64 == 0x7f7f7f7f7f7f7f7fULL) + if (*r64 == expected++) continue; /* register did not change */ if (syscall_addr != (long)&int80) { /* @@ -147,18 +155,17 @@ int check_regs64(void) continue; } } else { - /* INT80 syscall entrypoint can be used by + /* + * INT80 syscall entrypoint can be used by * 64-bit programs too, unlike SYSCALL/SYSENTER. * Therefore it must preserve R12+ * (they are callee-saved registers in 64-bit C ABI). * - * This was probably historically not intended, - * but R8..11 are clobbered (cleared to 0). - * IOW: they are the only registers which aren't - * preserved across INT80 syscall. + * Starting in Linux 4.17 (and any kernel that + * backports the change), R8..11 are preserved. + * Historically (and probably unintentionally), they + * were clobbered or zeroed. */ - if (*r64 == 0 && num <= 11) - continue; } printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64); err++; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index dba629c5f8ac..a4c1b76240df 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static u32 kvm_next_vmid; static unsigned int kvm_vmid_bits __read_mostly; -static DEFINE_SPINLOCK(kvm_vmid_lock); +static DEFINE_RWLOCK(kvm_vmid_lock); static bool vgic_present; @@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm) { phys_addr_t pgd_phys; u64 vmid; + bool new_gen; - if (!need_new_vmid_gen(kvm)) + read_lock(&kvm_vmid_lock); + new_gen = need_new_vmid_gen(kvm); + read_unlock(&kvm_vmid_lock); + + if (!new_gen) return; - spin_lock(&kvm_vmid_lock); + write_lock(&kvm_vmid_lock); /* * We need to re-check the vmid_gen here to ensure that if another vcpu @@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm) * use the same vmid. */ if (!need_new_vmid_gen(kvm)) { - spin_unlock(&kvm_vmid_lock); + write_unlock(&kvm_vmid_lock); return; } @@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm) vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; - spin_unlock(&kvm_vmid_lock); + write_unlock(&kvm_vmid_lock); } static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 6919352cbf15..c4762bef13c6 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -18,6 +18,7 @@ #include <linux/arm-smccc.h> #include <linux/preempt.h> #include <linux/kvm_host.h> +#include <linux/uaccess.h> #include <linux/wait.h> #include <asm/cputype.h> @@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) smccc_set_retval(vcpu, val, 0, 0, 0); return 1; } + +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) +{ + return 1; /* PSCI version */ +} + +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices)) + return -EFAULT; + + return 0; +} + +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + if (reg->id == KVM_REG_ARM_PSCI_VERSION) { + void __user *uaddr = (void __user *)(long)reg->addr; + u64 val; + + val = kvm_psci_version(vcpu, vcpu->kvm); + if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; + } + + return -EINVAL; +} + +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + if (reg->id == KVM_REG_ARM_PSCI_VERSION) { + void __user *uaddr = (void __user *)(long)reg->addr; + bool wants_02; + u64 val; + + if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); + + switch (val) { + case KVM_ARM_PSCI_0_1: + if (wants_02) + return -EINVAL; + vcpu->kvm->arch.psci_version = val; + return 0; + case KVM_ARM_PSCI_0_2: + case KVM_ARM_PSCI_1_0: + if (!wants_02) + return -EINVAL; + vcpu->kvm->arch.psci_version = val; + return 0; + } + } + + return -EINVAL; +} diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 68378fe17a0e..e07156c30323 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -423,7 +423,7 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) * We cannot rely on the vgic maintenance interrupt to be * delivered synchronously. This means we can only use it to * exit the VM, and we perform the handling of EOIed - * interrupts on the exit path (see vgic_process_maintenance). + * interrupts on the exit path (see vgic_fold_lr_state). */ return IRQ_HANDLED; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index e21e2f49b005..ffc587bf4742 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -14,6 +14,8 @@ #include <linux/irqchip/arm-gic.h> #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <linux/nospec.h> + #include <kvm/iodev.h> #include <kvm/arm_vgic.h> @@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, if (n > vgic_v3_max_apr_idx(vcpu)) return 0; + + n = array_index_nospec(n, 4); + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ return vgicv3->vgic_ap1r[n]; } diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index dbe99d635c80..ff9655cfeb2f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, irq->vcpu->cpu != -1) /* VCPU thread is running */ cond_resched_lock(&irq->irq_lock); - if (irq->hw) + if (irq->hw) { vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); - else + } else { + u32 model = vcpu->kvm->arch.vgic.vgic_model; + irq->active = active; + if (model == KVM_DEV_TYPE_ARM_VGIC_V2 && + active && vgic_irq_is_sgi(irq->intid)) + irq->active_source = requester_vcpu->vcpu_id; + } if (irq->active) vgic_queue_irq_unlock(vcpu->kvm, irq, flags); diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 45aa433f018f..a5f2e44f1c33 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void) vgic_v2_write_lr(i, 0); } -void vgic_v2_set_npie(struct kvm_vcpu *vcpu) -{ - struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; - - cpuif->vgic_hcr |= GICH_HCR_NPIE; -} - void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; @@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) int lr; unsigned long flags; - cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); + cpuif->vgic_hcr &= ~GICH_HCR_UIE; for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { u32 val = cpuif->vgic_lr[lr]; - u32 intid = val & GICH_LR_VIRTUALID; + u32 cpuid, intid = val & GICH_LR_VIRTUALID; struct vgic_irq *irq; + /* Extract the source vCPU id from the LR */ + cpuid = val & GICH_LR_PHYSID_CPUID; + cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; + cpuid &= 7; + /* Notify fds when the guest EOI'ed a level-triggered SPI */ if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) kvm_notify_acked_irq(vcpu->kvm, 0, @@ -90,17 +88,16 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) /* Always preserve the active bit */ irq->active = !!(val & GICH_LR_ACTIVE_BIT); + if (irq->active && vgic_irq_is_sgi(intid)) + irq->active_source = cpuid; + /* Edge is the only case where we preserve the pending bit */ if (irq->config == VGIC_CONFIG_EDGE && (val & GICH_LR_PENDING_BIT)) { irq->pending_latch = true; - if (vgic_irq_is_sgi(intid)) { - u32 cpuid = val & GICH_LR_PHYSID_CPUID; - - cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; + if (vgic_irq_is_sgi(intid)) irq->source |= (1 << cpuid); - } } /* @@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) u32 val = irq->intid; bool allow_pending = true; - if (irq->active) + if (irq->active) { val |= GICH_LR_ACTIVE_BIT; + if (vgic_irq_is_sgi(irq->intid)) + val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; + if (vgic_irq_is_multi_sgi(irq)) { + allow_pending = false; + val |= GICH_LR_EOI; + } + } if (irq->hw) { val |= GICH_LR_HW; @@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) BUG_ON(!src); val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; irq->source &= ~(1 << (src - 1)); - if (irq->source) + if (irq->source) { irq->pending_latch = true; + val |= GICH_LR_EOI; + } } } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 8195f52ae6f0..c7423f3768e5 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -27,13 +27,6 @@ static bool group1_trap; static bool common_trap; static bool gicv4_enable; -void vgic_v3_set_npie(struct kvm_vcpu *vcpu) -{ - struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; - - cpuif->vgic_hcr |= ICH_HCR_NPIE; -} - void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; @@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) int lr; unsigned long flags; - cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); + cpuif->vgic_hcr &= ~ICH_HCR_UIE; for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { u64 val = cpuif->vgic_lr[lr]; - u32 intid; + u32 intid, cpuid; struct vgic_irq *irq; + bool is_v2_sgi = false; - if (model == KVM_DEV_TYPE_ARM_VGIC_V3) + cpuid = val & GICH_LR_PHYSID_CPUID; + cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; + + if (model == KVM_DEV_TYPE_ARM_VGIC_V3) { intid = val & ICH_LR_VIRTUAL_ID_MASK; - else + } else { intid = val & GICH_LR_VIRTUALID; + is_v2_sgi = vgic_irq_is_sgi(intid); + } /* Notify fds when the guest EOI'ed a level-triggered IRQ */ if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) @@ -81,18 +80,16 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) /* Always preserve the active bit */ irq->active = !!(val & ICH_LR_ACTIVE_BIT); + if (irq->active && is_v2_sgi) + irq->active_source = cpuid; + /* Edge is the only case where we preserve the pending bit */ if (irq->config == VGIC_CONFIG_EDGE && (val & ICH_LR_PENDING_BIT)) { irq->pending_latch = true; - if (vgic_irq_is_sgi(intid) && - model == KVM_DEV_TYPE_ARM_VGIC_V2) { - u32 cpuid = val & GICH_LR_PHYSID_CPUID; - - cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; + if (is_v2_sgi) irq->source |= (1 << cpuid); - } } /* @@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) { u32 model = vcpu->kvm->arch.vgic.vgic_model; u64 val = irq->intid; - bool allow_pending = true; + bool allow_pending = true, is_v2_sgi; - if (irq->active) + is_v2_sgi = (vgic_irq_is_sgi(irq->intid) && + model == KVM_DEV_TYPE_ARM_VGIC_V2); + + if (irq->active) { val |= ICH_LR_ACTIVE_BIT; + if (is_v2_sgi) + val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; + if (vgic_irq_is_multi_sgi(irq)) { + allow_pending = false; + val |= ICH_LR_EOI; + } + } if (irq->hw) { val |= ICH_LR_HW; @@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) BUG_ON(!src); val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; irq->source &= ~(1 << (src - 1)); - if (irq->source) + if (irq->source) { irq->pending_latch = true; + val |= ICH_LR_EOI; + } } } diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index e74baec76361..97bfba8d9a59 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -14,11 +14,13 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/list_sort.h> -#include <linux/interrupt.h> -#include <linux/irq.h> +#include <linux/nospec.h> + #include <asm/kvm_hyp.h> #include "vgic.h" @@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid) { /* SGIs and PPIs */ - if (intid <= VGIC_MAX_PRIVATE) + if (intid <= VGIC_MAX_PRIVATE) { + intid = array_index_nospec(intid, VGIC_MAX_PRIVATE); return &vcpu->arch.vgic_cpu.private_irqs[intid]; + } /* SPIs */ - if (intid <= VGIC_MAX_SPI) + if (intid <= VGIC_MAX_SPI) { + intid = array_index_nospec(intid, VGIC_MAX_SPI); return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; + } /* LPIs */ if (intid >= VGIC_MIN_LPI) @@ -594,6 +600,7 @@ retry: list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; + bool target_vcpu_needs_kick = false; spin_lock(&irq->irq_lock); @@ -664,11 +671,18 @@ retry: list_del(&irq->ap_list); irq->vcpu = target_vcpu; list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); + target_vcpu_needs_kick = true; } spin_unlock(&irq->irq_lock); spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); + + if (target_vcpu_needs_kick) { + kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); + kvm_vcpu_kick(target_vcpu); + } + goto retry; } @@ -711,14 +725,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) vgic_v3_set_underflow(vcpu); } -static inline void vgic_set_npie(struct kvm_vcpu *vcpu) -{ - if (kvm_vgic_global_state.type == VGIC_V2) - vgic_v2_set_npie(vcpu); - else - vgic_v3_set_npie(vcpu); -} - /* Requires the ap_list_lock to be held. */ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, bool *multi_sgi) @@ -732,17 +738,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { + int w; + spin_lock(&irq->irq_lock); /* GICv2 SGIs can count for more than one... */ - if (vgic_irq_is_sgi(irq->intid) && irq->source) { - int w = hweight8(irq->source); - - count += w; - *multi_sgi |= (w > 1); - } else { - count++; - } + w = vgic_irq_get_lr_count(irq); spin_unlock(&irq->irq_lock); + + count += w; + *multi_sgi |= (w > 1); } return count; } @@ -753,7 +757,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_irq *irq; int count; - bool npie = false; bool multi_sgi; u8 prio = 0xff; @@ -783,10 +786,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) if (likely(vgic_target_oracle(irq) == vcpu)) { vgic_populate_lr(vcpu, irq, count++); - if (irq->source) { - npie = true; + if (irq->source) prio = irq->priority; - } } spin_unlock(&irq->irq_lock); @@ -799,9 +800,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) } } - if (npie) - vgic_set_npie(vcpu); - vcpu->arch.vgic_cpu.used_lrs = count; /* Nuke remaining LRs */ diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 830e815748a0..32c25d42c93f 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq) return irq->config == VGIC_CONFIG_LEVEL && irq->hw; } +static inline int vgic_irq_get_lr_count(struct vgic_irq *irq) +{ + /* Account for the active state as an interrupt */ + if (vgic_irq_is_sgi(irq->intid) && irq->source) + return hweight8(irq->source) + irq->active; + + return irq_is_pending(irq) || irq->active; +} + +static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq) +{ + return vgic_irq_get_lr_count(irq) > 1; +} + /* * This struct provides an intermediate representation of the fields contained * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC |