diff options
author | David S. Miller <davem@davemloft.net> | 2014-02-19 07:24:22 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-02-19 07:24:22 +0100 |
commit | 1e8d6421cff2c24fe0b345711e7a21af02e8bcf5 (patch) | |
tree | 773b30106efb9b48055bc93958e5a94ac53768ce | |
parent | rtnl: make ifla_policy static (diff) | |
parent | Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux (diff) | |
download | linux-1e8d6421cff2c24fe0b345711e7a21af02e8bcf5.tar.xz linux-1e8d6421cff2c24fe0b345711e7a21af02e8bcf5.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/bonding/bond_3ad.h
drivers/net/bonding/bond_main.c
Two minor conflicts in bonding, both of which were overlapping
changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
540 files changed, 5378 insertions, 3558 deletions
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty index ad22fb0ee765..a2ccec35ffce 100644 --- a/Documentation/ABI/testing/sysfs-tty +++ b/Documentation/ABI/testing/sysfs-tty @@ -3,7 +3,8 @@ Date: Nov 2010 Contact: Kay Sievers <kay.sievers@vrfy.org> Description: Shows the list of currently configured - console devices, like 'tty1 ttyS0'. + tty devices used for the console, + like 'tty1 ttyS0'. The last entry in the file is the active device connected to /dev/console. The file supports poll() to detect virtual diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt index 0a85c70cd30a..07ad02075a93 100644 --- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt +++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt @@ -13,6 +13,9 @@ Required properties: - #address-cells: should be one. The cell is the slot id. - #size-cells: should be zero. - at least one slot node +- clock-names: tuple listing input clock names. + Required elements: "mci_clk" +- clocks: phandles to input clocks. The node contains child nodes for each slot that the platform uses @@ -24,6 +27,8 @@ mmc0: mmc@f0008000 { interrupts = <12 4>; #address-cells = <1>; #size-cells = <0>; + clock-names = "mci_clk"; + clocks = <&mci0_clk>; [ child node definitions...] }; diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt new file mode 100644 index 000000000000..3dd3d0bf112f --- /dev/null +++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt @@ -0,0 +1,58 @@ +STMicroelectronics SoC DWMAC glue layer controller + +The device node has following properties. + +Required properties: + - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or + "st,stid127-dwmac". + - reg : Offset of the glue configuration register map in system + configuration regmap pointed by st,syscon property and size. + + - reg-names : Should be "sti-ethconf". + + - st,syscon : Should be phandle to system configuration node which + encompases this glue registers. + + - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be + wired up in from different sources. One via TXCLK pin and other via CLK_125 + pin. This wiring is totally board dependent. However the retiming glue + logic should be configured accordingly. Possible values for this property + + "txclk" - if 125Mhz clock is wired up via txclk line. + "clk_125" - if 125Mhz clock is wired up via clk_125 line. + + This property is only valid for Giga bit setup( GMII, RGMII), and it is + un-used for non-giga bit (MII and RMII) setups. Also note that internal + clockgen can not generate stable 125Mhz clock. + + - st,ext-phyclk: This boolean property indicates who is generating the clock + for tx and rx. This property is only valid for RMII case where the clock can + be generated from the MAC or PHY. + + - clock-names: should be "sti-ethclk". + - clocks: Should point to ethernet clockgen which can generate phyclk. + + +Example: + +ethernet0: dwmac@fe810000 { + device_type = "network"; + compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710"; + reg = <0xfe810000 0x8000>, <0x8bc 0x4>; + reg-names = "stmmaceth", "sti-ethconf"; + interrupts = <0 133 0>, <0 134 0>, <0 135 0>; + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; + phy-mode = "mii"; + + st,syscon = <&syscfg_rear>; + + snps,pbl = <32>; + snps,mixed-burst; + + resets = <&softreset STIH416_ETH0_SOFTRESET>; + reset-names = "stmmaceth"; + pinctrl-0 = <&pinctrl_mii0>; + pinctrl-names = "default"; + clocks = <&CLK_S_GMAC0_PHY>; + clock-names = "stmmaceth"; +}; diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt new file mode 100644 index 000000000000..d0327f0b59ad --- /dev/null +++ b/Documentation/devicetree/bindings/power/bq2415x.txt @@ -0,0 +1,47 @@ +Binding for TI bq2415x Li-Ion Charger + +Required properties: +- compatible: Should contain one of the following: + * "ti,bq24150" + * "ti,bq24150" + * "ti,bq24150a" + * "ti,bq24151" + * "ti,bq24151a" + * "ti,bq24152" + * "ti,bq24153" + * "ti,bq24153a" + * "ti,bq24155" + * "ti,bq24156" + * "ti,bq24156a" + * "ti,bq24158" +- reg: integer, i2c address of the device. +- ti,current-limit: integer, initial maximum current charger can pull + from power supply in mA. +- ti,weak-battery-voltage: integer, weak battery voltage threshold in mV. + The chip will use slow precharge if battery voltage + is below this value. +- ti,battery-regulation-voltage: integer, maximum charging voltage in mV. +- ti,charge-current: integer, maximum charging current in mA. +- ti,termination-current: integer, charge will be terminated when current in + constant-voltage phase drops below this value (in mA). +- ti,resistor-sense: integer, value of sensing resistor in milliohm. + +Optional properties: +- ti,usb-charger-detection: phandle to usb charger detection device. + (required for auto mode) + +Example from Nokia N900: + +bq24150a { + compatible = "ti,bq24150a"; + reg = <0x6b>; + + ti,current-limit = <100>; + ti,weak-battery-voltage = <3400>; + ti,battery-regulation-voltage = <4200>; + ti,charge-current = <650>; + ti,termination-current = <100>; + ti,resistor-sense = <68>; + + ti,usb-charger-detection = <&isp1704>; +}; diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt index 07e04cdc0c9e..4f8184d069cb 100644 --- a/Documentation/devicetree/bindings/spi/spi_atmel.txt +++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt @@ -5,6 +5,9 @@ Required properties: - reg: Address and length of the register set for the device - interrupts: Should contain spi interrupt - cs-gpios: chipselects +- clock-names: tuple listing input clock names. + Required elements: "spi_clk" +- clocks: phandles to input clocks. Example: @@ -14,6 +17,8 @@ spi1: spi@fffcc000 { interrupts = <13 4 5>; #address-cells = <1>; #size-cells = <0>; + clocks = <&spi1_clk>; + clock-names = "spi_clk"; cs-gpios = <&pioB 3 0>; status = "okay"; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 3f900cd51bf0..40ce2df0e0e9 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -8,6 +8,7 @@ ad Avionic Design GmbH adi Analog Devices, Inc. aeroflexgaisler Aeroflex Gaisler AB ak Asahi Kasei Corp. +allwinner Allwinner Technology Co., Ltd. altr Altera Corp. amcc Applied Micro Circuits Corporation (APM, formally AMCC) amstaos AMS-Taos Inc. @@ -40,6 +41,7 @@ gmt Global Mixed-mode Technology, Inc. gumstix Gumstix, Inc. haoyu Haoyu Microelectronic Co. Ltd. hisilicon Hisilicon Limited. +honeywell Honeywell hp Hewlett Packard ibm International Business Machines (IBM) idt Integrated Device Technologies, Inc. @@ -55,6 +57,7 @@ maxim Maxim Integrated Products microchip Microchip Technology Inc. mosaixtech Mosaix Technologies, Inc. national National Semiconductor +neonode Neonode Inc. nintendo Nintendo nvidia NVIDIA nxp NXP Semiconductors @@ -64,7 +67,7 @@ phytec PHYTEC Messtechnik GmbH picochip Picochip Ltd powervr PowerVR (deprecated, use img) qca Qualcomm Atheros, Inc. -qcom Qualcomm, Inc. +qcom Qualcomm Technologies, Inc ralink Mediatek/Ralink Technology Corp. ramtron Ramtron International realtek Realtek Semiconductor Corp. @@ -78,6 +81,7 @@ silabs Silicon Laboratories simtek sirf SiRF Technology, Inc. snps Synopsys, Inc. +spansion Spansion Inc. st STMicroelectronics ste ST-Ericsson stericsson ST-Ericsson diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices index c70e7a7638d1..0d85ac1935b7 100644 --- a/Documentation/i2c/instantiating-devices +++ b/Documentation/i2c/instantiating-devices @@ -8,8 +8,8 @@ reason, the kernel code must instantiate I2C devices explicitly. There are several ways to achieve this, depending on the context and requirements. -Method 1: Declare the I2C devices by bus number ------------------------------------------------ +Method 1a: Declare the I2C devices by bus number +------------------------------------------------ This method is appropriate when the I2C bus is a system bus as is the case for many embedded systems. On such systems, each I2C bus has a number @@ -51,6 +51,43 @@ The devices will be automatically unbound and destroyed when the I2C bus they sit on goes away (if ever.) +Method 1b: Declare the I2C devices via devicetree +------------------------------------------------- + +This method has the same implications as method 1a. The declaration of I2C +devices is here done via devicetree as subnodes of the master controller. + +Example: + + i2c1: i2c@400a0000 { + /* ... master properties skipped ... */ + clock-frequency = <100000>; + + flash@50 { + compatible = "atmel,24c256"; + reg = <0x50>; + }; + + pca9532: gpio@60 { + compatible = "nxp,pca9532"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x60>; + }; + }; + +Here, two devices are attached to the bus using a speed of 100kHz. For +additional properties which might be needed to set up the device, please refer +to its devicetree documentation in Documentation/devicetree/bindings/. + + +Method 1c: Declare the I2C devices via ACPI +------------------------------------------- + +ACPI can also describe I2C devices. There is special documentation for this +which is currently located at Documentation/acpi/enumeration.txt. + + Method 2: Instantiate the devices explicitly -------------------------------------------- diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt deleted file mode 100644 index 72f38b13101d..000000000000 --- a/Documentation/networking/3c505.txt +++ /dev/null @@ -1,45 +0,0 @@ -The 3Com Etherlink Plus (3c505) driver. - -This driver now uses DMA. There is currently no support for PIO operation. -The default DMA channel is 6; this is _not_ autoprobed, so you must -make sure you configure it correctly. If loading the driver as a -module, you can do this with "modprobe 3c505 dma=n". If the driver is -linked statically into the kernel, you must either use an "ether=" -statement on the command line, or change the definition of ELP_DMA in 3c505.h. - -The driver will warn you if it has to fall back on the compiled in -default DMA channel. - -If no base address is given at boot time, the driver will autoprobe -ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver -will try to probe for it. - -The driver can be used as a loadable module. - -Theoretically, one instance of the driver can now run multiple cards, -in the standard way (when loading a module, say "modprobe 3c505 -io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested -this, though. - -The driver may now support revision 2 hardware; the dependency on -being able to read the host control register has been removed. This -is also untested, since I don't have a suitable card. - -Known problems: - I still see "DMA upload timed out" messages from time to time. These -seem to be fairly non-fatal though. - The card is old and slow. - -To do: - Improve probe/setup code - Test multicast and promiscuous operation - -Authors: - The driver is mainly written by Craig Southeren, email - <craigs@ineluki.apana.org.au>. - Parts of the driver (adapting the driver to 1.1.4+ kernels, - IRQ/address detection, some changes) and this README by - Juha Laiho <jlaiho@ichaos.nullnet.fi>. - DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk> - Multicard support, Software configurable DMA, etc., by - Christopher Collins <ccollins@pcug.org.au> diff --git a/Documentation/phy.txt b/Documentation/phy.txt index 0103e4b15b0e..ebff6ee52441 100644 --- a/Documentation/phy.txt +++ b/Documentation/phy.txt @@ -75,14 +75,26 @@ Before the controller can make use of the PHY, it has to get a reference to it. This framework provides the following APIs to get a reference to the PHY. struct phy *phy_get(struct device *dev, const char *string); +struct phy *phy_optional_get(struct device *dev, const char *string); struct phy *devm_phy_get(struct device *dev, const char *string); - -phy_get and devm_phy_get can be used to get the PHY. In the case of dt boot, -the string arguments should contain the phy name as given in the dt data and -in the case of non-dt boot, it should contain the label of the PHY. -The only difference between the two APIs is that devm_phy_get associates the -device with the PHY using devres on successful PHY get. On driver detach, -release function is invoked on the the devres data and devres data is freed. +struct phy *devm_phy_optional_get(struct device *dev, const char *string); + +phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can +be used to get the PHY. In the case of dt boot, the string arguments +should contain the phy name as given in the dt data and in the case of +non-dt boot, it should contain the label of the PHY. The two +devm_phy_get associates the device with the PHY using devres on +successful PHY get. On driver detach, release function is invoked on +the the devres data and devres data is freed. phy_optional_get and +devm_phy_optional_get should be used when the phy is optional. These +two functions will never return -ENODEV, but instead returns NULL when +the phy cannot be found. + +It should be noted that NULL is a valid phy reference. All phy +consumer calls on the NULL phy become NOPs. That is the release calls, +the phy_init() and phy_exit() calls, and phy_power_on() and +phy_power_off() calls are all NOP when applied to a NULL phy. The NULL +phy is useful in devices for handling optional phy devices. 5. Releasing a reference to the PHY diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index f72e0d1e0da8..7982bcc4d151 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary @@ -543,7 +543,22 @@ SPI MASTER METHODS queuing transfers that arrive in the meantime. When the driver is finished with this message, it must call spi_finalize_current_message() so the subsystem can issue the next - transfer. This may sleep. + message. This may sleep. + + master->transfer_one(struct spi_master *master, struct spi_device *spi, + struct spi_transfer *transfer) + The subsystem calls the driver to transfer a single transfer while + queuing transfers that arrive in the meantime. When the driver is + finished with this transfer, it must call + spi_finalize_current_transfer() so the subsystem can issue the next + transfer. This may sleep. Note: transfer_one and transfer_one_message + are mutually exclusive; when both are set, the generic subsystem does + not call your transfer_one callback. + + Return values: + negative errno: error + 0: transfer is finished + 1: transfer is still in progress DEPRECATED METHODS diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt index 28fa325b7461..6f6d956ac1c9 100644 --- a/Documentation/zh_CN/arm64/booting.txt +++ b/Documentation/zh_CN/arm64/booting.txt @@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated or if there is a problem with the translation. Maintainer: Will Deacon <will.deacon@arm.com> -Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> +Chinese maintainer: Fu Wei <wefu@redhat.com> --------------------------------------------------------------------- Documentation/arm64/booting.txt çš„ä¸æ–‡ç¿»è¯‘ @@ -16,9 +16,9 @@ Documentation/arm64/booting.txt çš„ä¸æ–‡ç¿»è¯‘ 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 英文版维护者: Will Deacon <will.deacon@arm.com> -ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> -ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> -ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> +ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <wefu@redhat.com> +ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> +ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> 以下为æ£æ–‡ --------------------------------------------------------------------- @@ -64,8 +64,8 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½• å¿…è¦æ€§: 强制 -è®¾å¤‡æ ‘æ•°æ®å—(dtb)大å°å¿…é¡»ä¸å¤§äºŽ 2 MB,且ä½äºŽä»Žå†…æ ¸æ˜ åƒèµ·å§‹ç®—起第一个 -512MB 内的 2MB è¾¹ç•Œä¸Šã€‚è¿™ä½¿å¾—å†…æ ¸å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸çš„å•ä¸ªèŠ‚æè¿°ç¬¦æ¥ +è®¾å¤‡æ ‘æ•°æ®å—(dtb)必须 8 å—节对é½ï¼Œå¹¶ä½äºŽä»Žå†…æ ¸æ˜ åƒèµ·å§‹ç®—起第一个 512MB +内,且ä¸å¾—跨越 2MB 对é½è¾¹ç•Œã€‚è¿™ä½¿å¾—å†…æ ¸å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸çš„å•ä¸ªèŠ‚æè¿°ç¬¦æ¥ æ˜ å°„æ¤æ•°æ®å—。 @@ -84,13 +84,23 @@ AArch64 å†…æ ¸å½“å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ¤å¦‚果使用了压缩内 å¿…è¦æ€§: 强制 -å·²è§£åŽ‹çš„å†…æ ¸æ˜ åƒåŒ…å«ä¸€ä¸ª 32 å—节的头,内容如下: +å·²è§£åŽ‹çš„å†…æ ¸æ˜ åƒåŒ…å«ä¸€ä¸ª 64 å—节的头,内容如下: - u32 magic = 0x14000008; /* 跳转到 stext, å°ç«¯ */ - u32 res0 = 0; /* ä¿ç•™ */ + u32 code0; /* å¯æ‰§è¡Œä»£ç */ + u32 code1; /* å¯æ‰§è¡Œä»£ç */ u64 text_offset; /* æ˜ åƒè£…è½½å移 */ + u64 res0 = 0; /* ä¿ç•™ */ u64 res1 = 0; /* ä¿ç•™ */ u64 res2 = 0; /* ä¿ç•™ */ + u64 res3 = 0; /* ä¿ç•™ */ + u64 res4 = 0; /* ä¿ç•™ */ + u32 magic = 0x644d5241; /* é”æ•°, å°ç«¯, "ARM\x64" */ + u32 res5 = 0; /* ä¿ç•™ */ + + +æ˜ åƒå¤´æ³¨é‡Šï¼š + +- code0/code1 负责跳转到 stext. æ˜ åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM 的起始地å€å¿…须是以 2MB 对é½çš„。 @@ -118,9 +128,9 @@ AArch64 å†…æ ¸å½“å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ¤å¦‚果使用了压缩内 外部高速缓å˜ï¼ˆå¦‚æžœå˜åœ¨ï¼‰å¿…é¡»é…置并ç¦ç”¨ã€‚ - 架构计时器 - CNTFRQ 必须设定为计时器的频率。 - 如果在 EL1 模å¼ä¸‹è¿›å…¥å†…æ ¸ï¼Œåˆ™ CNTHCTL_EL2 ä¸çš„ EL1PCTEN (bit 0) - 必须置ä½ã€‚ + CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU + 都一致的值。如果在 EL1 模å¼ä¸‹è¿›å…¥å†…æ ¸ï¼Œåˆ™ CNTHCTL_EL2 ä¸çš„ + EL1PCTEN (bit 0) 必须置ä½ã€‚ - 一致性 é€šè¿‡å†…æ ¸å¯åŠ¨çš„所有 CPU åœ¨å†…æ ¸å…¥å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域ä¸ã€‚ @@ -131,23 +141,40 @@ AArch64 å†…æ ¸å½“å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ¤å¦‚果使用了压缩内 åœ¨è¿›å…¥å†…æ ¸æ˜ åƒçš„异常级ä¸ï¼Œæ‰€æœ‰æž„架ä¸å¯å†™çš„系统寄å˜å™¨å¿…须通过软件 在一个更高的异常级别下åˆå§‹åŒ–,以防æ¢åœ¨ 未知 状æ€ä¸‹è¿è¡Œã€‚ +以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„å˜å™¨çš„ +å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚å¸¸çº§åˆ«è·³å…¥å†…æ ¸ã€‚ + 引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…æ ¸å…¥å£ï¼š - 主 CPU å¿…é¡»ç›´æŽ¥è·³å…¥å†…æ ¸æ˜ åƒçš„第一æ¡æŒ‡ä»¤ã€‚é€šè¿‡æ¤ CPU ä¼ é€’çš„è®¾å¤‡æ ‘ - æ•°æ®å—必须在æ¯ä¸ª CPU 节点ä¸åŒ…å«ä»¥ä¸‹å†…容: - - 1ã€â€˜enable-method’属性。目å‰ï¼Œæ¤å—段支æŒçš„值仅为å—符串“spin-tableâ€ã€‚ - - 2ã€â€˜cpu-release-addrâ€™æ ‡è¯†ä¸€ä¸ª 64-bitã€åˆå§‹åŒ–为零的内å˜ä½ç½®ã€‚ + æ•°æ®å—必须在æ¯ä¸ª CPU 节点ä¸åŒ…å«ä¸€ä¸ª ‘enable-method’ 属性,所 + 支æŒçš„ enable-method 请è§ä¸‹æ–‡ã€‚ 引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘å±žæ€§ï¼Œå¹¶åœ¨è·³å…¥å†…æ ¸å…¥å£ä¹‹å‰å°†å…¶æ’å…¥ æ•°æ®å—。 -- 任何辅助 CPU 必须在内å˜ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘ä¸çš„ /memreserve/ åŸŸä¼ é€’ +- enable-method 为 “spin-table†的 CPU 必须在它们的 CPU + 节点ä¸åŒ…å«ä¸€ä¸ª ‘cpu-release-addr’ å±žæ€§ã€‚è¿™ä¸ªå±žæ€§æ ‡è¯†äº†ä¸€ä¸ª + 64 ä½è‡ªç„¶å¯¹é½ä¸”åˆå§‹åŒ–为零的内å˜ä½ç½®ã€‚ + + 这些 CPU 必须在内å˜ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘ä¸çš„ /memreserve/ åŸŸä¼ é€’ ç»™å†…æ ¸ï¼‰ä¸è‡ªæ—‹äºŽå†…æ ¸ä¹‹å¤–ï¼Œè½®è¯¢å®ƒä»¬çš„ cpu-release-addr ä½ç½®ï¼ˆå¿…é¡» 包å«åœ¨ä¿ç•™åŒºä¸ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°† å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼ - 时,CPU 必须直接跳入æ¤å€¼æ‰€æŒ‡å‘的地å€ã€‚ + 时,CPU 必须跳入æ¤å€¼æ‰€æŒ‡å‘的地å€ã€‚æ¤å€¼ä¸ºä¸€ä¸ªå•ç‹¬çš„ 64 ä½å°ç«¯å€¼ï¼Œ + å› æ¤ CPU 须在跳转å‰å°†æ‰€è¯»å–的值转æ¢ä¸ºå…¶æœ¬èº«çš„端模å¼ã€‚ + +- enable-method 为 “psci†的 CPU ä¿æŒåœ¨å†…æ ¸å¤–ï¼ˆæ¯”å¦‚ï¼Œåœ¨ + memory 节点ä¸æè¿°ä¸ºå†…æ ¸ç©ºé—´çš„å†…å˜åŒºå¤–ï¼Œæˆ–åœ¨é€šè¿‡è®¾å¤‡æ ‘ /memreserve/ + 域ä¸æè¿°ä¸ºå†…æ ¸ä¿ç•™åŒºçš„空间ä¸ï¼‰ã€‚å†…æ ¸å°†ä¼šå‘起在 ARM æ–‡æ¡£ï¼ˆç¼–å· + ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸æè¿°çš„ + CPU_ON 调用æ¥å°† CPU å¸¦å…¥å†…æ ¸ã€‚ + + *译者注:到文档翻译时,æ¤æ–‡æ¡£å·²æ›´æ–°ä¸º ARM DEN 0022B。 + + è®¾å¤‡æ ‘å¿…é¡»åŒ…å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档: + Documentation/devicetree/bindings/arm/psci.txt + - 辅助 CPU 通用寄å˜å™¨è®¾ç½® x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨) diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt index a5f6283829f9..a782704c1cb5 100644 --- a/Documentation/zh_CN/arm64/memory.txt +++ b/Documentation/zh_CN/arm64/memory.txt @@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated or if there is a problem with the translation. Maintainer: Catalin Marinas <catalin.marinas@arm.com> -Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> +Chinese maintainer: Fu Wei <wefu@redhat.com> --------------------------------------------------------------------- Documentation/arm64/memory.txt çš„ä¸æ–‡ç¿»è¯‘ @@ -16,9 +16,9 @@ Documentation/arm64/memory.txt çš„ä¸æ–‡ç¿»è¯‘ 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 英文版维护者: Catalin Marinas <catalin.marinas@arm.com> -ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> -ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> -ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> +ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <wefu@redhat.com> +ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> +ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> 以下为æ£æ–‡ --------------------------------------------------------------------- @@ -41,7 +41,7 @@ AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和å TTBR1 ä¸ï¼Œä¸”从ä¸å†™å…¥ TTBR0。 -AArch64 Linux 内å˜å¸ƒå±€ï¼š +AArch64 Linux 在页大å°ä¸º 4KB 时的内å˜å¸ƒå±€ï¼š èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” ----------------------------------------------------------------------- @@ -55,15 +55,42 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap] +ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备 + ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间 -ffffffbbffff0000 ffffffbcffffffff ~2MB [防护页] +ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页] ffffffbffc000000 ffffffbfffffffff 64MB æ¨¡å— ffffffc000000000 ffffffffffffffff 256GB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ +AArch64 Linux 在页大å°ä¸º 64KB 时的内å˜å¸ƒå±€ï¼š + +èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” +----------------------------------------------------------------------- +0000000000000000 000003ffffffffff 4TB 用户空间 + +fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc + +fffffdfbffff0000 fffffdfbffffffff 64KB [防护页] + +fffffdfc00000000 fffffdfdffffffff 8GB vmemmap + +fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap] + +fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备 + +fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间 + +fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页] + +fffffdfffc000000 fffffdffffffffff 64MB æ¨¡å— + +fffffe0000000000 ffffffffffffffff 2TB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ + + 4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š +--------+--------+--------+--------+--------+--------+--------+--------+ @@ -91,3 +118,10 @@ ffffffc000000000 ffffffffffffffff 256GB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 ) | +-------------------------------> [47:42] L1 索引 (未使用) +-------------------------------------------------> [63] TTBR0/1 + +当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 ä¸é€šè¿‡ç›¸å¯¹å†…æ ¸è™šæ‹Ÿåœ°å€çš„ +一个固定å移æ¥æ˜ å°„å†…æ ¸é¡µï¼ˆå†…æ ¸è™šæ‹Ÿåœ°å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰: + +èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” +----------------------------------------------------------------------- +0000004000000000 0000007fffffffff 256GB 在 HYP ä¸æ˜ å°„çš„å†…æ ¸å¯¹è±¡ diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt new file mode 100644 index 000000000000..2664d1bd5a1c --- /dev/null +++ b/Documentation/zh_CN/arm64/tagged-pointers.txt @@ -0,0 +1,52 @@ +Chinese translated version of Documentation/arm64/tagged-pointers.txt + +If you have any comment or update to the content, please contact the +original document maintainer directly. However, if you have a problem +communicating in English you can also ask the Chinese maintainer for +help. Contact the Chinese maintainer if this translation is outdated +or if there is a problem with the translation. + +Maintainer: Will Deacon <will.deacon@arm.com> +Chinese maintainer: Fu Wei <wefu@redhat.com> +--------------------------------------------------------------------- +Documentation/arm64/tagged-pointers.txt çš„ä¸æ–‡ç¿»è¯‘ + +如果想评论或更新本文的内容,请直接è”ç³»åŽŸæ–‡æ¡£çš„ç»´æŠ¤è€…ã€‚å¦‚æžœä½ ä½¿ç”¨è‹±æ–‡ +交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘ä¸æ–‡ç‰ˆç»´æŠ¤è€…求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻 +译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 + +英文版维护者: Will Deacon <will.deacon@arm.com> +ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <wefu@redhat.com> +ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> +ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> + +以下为æ£æ–‡ +--------------------------------------------------------------------- + Linux 在 AArch64 ä¸å¸¦æ ‡è®°çš„è™šæ‹Ÿåœ°å€ + ================================= + +作者: Will Deacon <will.deacon@arm.com> +日期: 2013 å¹´ 06 月 12 æ—¥ + +本文档简述了在 AArch64 地å€è½¬æ¢ç³»ç»Ÿä¸æä¾›çš„å¸¦æ ‡è®°çš„è™šæ‹Ÿåœ°å€åŠå…¶åœ¨ +AArch64 Linux ä¸çš„潜在用途。 + +å†…æ ¸æ供的地å€è½¬æ¢è¡¨é…置使通过 TTBR0 完æˆçš„虚拟地å€è½¬æ¢ï¼ˆå³ç”¨æˆ·ç©ºé—´ +æ˜ å°„ï¼‰ï¼Œå…¶è™šæ‹Ÿåœ°å€çš„最高 8 ä½ï¼ˆ63:56)会被转æ¢ç¡¬ä»¶æ‰€å¿½ç•¥ã€‚è¿™ç§æœºåˆ¶ +让这些ä½å¯ä¾›åº”用程åºè‡ªç”±ä½¿ç”¨ï¼Œå…¶æ³¨æ„事项如下: + + (1) å†…æ ¸è¦æ±‚æ‰€æœ‰ä¼ é€’åˆ° EL1 的用户空间地å€å¸¦æœ‰ 0x00 æ ‡è®°ã€‚ + è¿™æ„味ç€ä»»ä½•æºå¸¦ç”¨æˆ·ç©ºé—´è™šæ‹Ÿåœ°å€çš„系统调用(syscall) + å‚æ•° *å¿…é¡»* åœ¨é™·å…¥å†…æ ¸å‰ä½¿å®ƒä»¬çš„最高å—节被清零。 + + (2) éžé›¶æ ‡è®°åœ¨ä¼ 递信å·æ—¶ä¸è¢«ä¿å˜ã€‚è¿™æ„味ç€åœ¨åº”用程åºä¸åˆ©ç”¨äº† + æ ‡è®°çš„ä¿¡å·å¤„ç†å‡½æ•°æ— 法ä¾èµ– siginfo_t 的用户空间虚拟 + 地å€æ‰€æºå¸¦çš„包å«å…¶å†…部域信æ¯çš„æ ‡è®°ã€‚æ¤è§„则的一个例外是 + 当信å·æ˜¯åœ¨è°ƒè¯•è§‚察点的异常处ç†ç¨‹åºä¸äº§ç”Ÿçš„,æ¤æ—¶æ ‡è®°çš„ + ä¿¡æ¯å°†è¢«ä¿å˜ã€‚ + + (3) å½“ä½¿ç”¨å¸¦æ ‡è®°çš„æŒ‡é’ˆæ—¶éœ€ç‰¹åˆ«ç•™å¿ƒï¼Œå› ä¸ºä»…å¯¹ä¸¤ä¸ªè™šæ‹Ÿåœ°å€ + 的高å—节,C 编译器很å¯èƒ½æ— 法判æ–它们是ä¸åŒçš„。 + +æ¤æž„架会阻æ¢å¯¹å¸¦æ ‡è®°çš„ PC æŒ‡é’ˆçš„åˆ©ç”¨ï¼Œå› æ¤åœ¨å¼‚常返回时,其高å—节 +将被设置æˆä¸€ä¸ªä¸º “55†的扩展符。 diff --git a/MAINTAINERS b/MAINTAINERS index 5a7b3ecba94c..db8bb0d2379e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2373,7 +2373,7 @@ F: include/linux/cpufreq.h CPU FREQUENCY DRIVERS - ARM BIG LITTLE M: Viresh Kumar <viresh.kumar@linaro.org> -M: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> +M: Sudeep Holla <sudeep.holla@arm.com> L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php @@ -2863,7 +2863,7 @@ M: Jani Nikula <jani.nikula@linux.intel.com> L: intel-gfx@lists.freedesktop.org L: dri-devel@lists.freedesktop.org Q: http://patchwork.freedesktop.org/project/intel-gfx/ -T: git git://people.freedesktop.org/~danvet/drm-intel +T: git git://anongit.freedesktop.org/drm-intel S: Supported F: drivers/gpu/drm/i915/ F: include/drm/i915* @@ -3330,6 +3330,17 @@ S: Maintained F: include/linux/netfilter_bridge/ F: net/bridge/ +ETHERNET PHY LIBRARY +M: Florian Fainelli <f.fainelli@gmail.com> +L: netdev@vger.kernel.org +S: Maintained +F: include/linux/phy.h +F: include/linux/phy_fixed.h +F: drivers/net/phy/ +F: Documentation/networking/phy.txt +F: drivers/of/of_mdio.c +F: drivers/of/of_net.c + EXT2 FILE SYSTEM M: Jan Kara <jack@suse.cz> L: linux-ext4@vger.kernel.org @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Shuffling Zombie Juror # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index b9d6a8b485e0..6d1e43d46187 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb # sama5d3 +dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts new file mode 100644 index 000000000000..ce1375595e5f --- /dev/null +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -0,0 +1,229 @@ +/* + * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board + * + * Copyright (C) 2014 Atmel, + * 2014 Nicolas Ferre <nicolas.ferre@atmel.com> + * + * Licensed under GPLv2 or later. + */ +/dts-v1/; +#include "sama5d36.dtsi" + +/ { + model = "SAMA5D3 Xplained"; + compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5"; + + chosen { + bootargs = "console=ttyS0,115200"; + }; + + memory { + reg = <0x20000000 0x10000000>; + }; + + ahb { + apb { + mmc0: mmc@f0000000 { + pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>; + status = "okay"; + slot@0 { + reg = <0>; + bus-width = <8>; + cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>; + }; + }; + + spi0: spi@f0004000 { + cs-gpios = <&pioD 13 0>; + status = "okay"; + }; + + can0: can@f000c000 { + status = "okay"; + }; + + i2c0: i2c@f0014000 { + status = "okay"; + }; + + i2c1: i2c@f0018000 { + status = "okay"; + }; + + macb0: ethernet@f0028000 { + phy-mode = "rgmii"; + status = "okay"; + }; + + usart0: serial@f001c000 { + status = "okay"; + }; + + usart1: serial@f0020000 { + pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>; + status = "okay"; + }; + + uart0: serial@f0024000 { + status = "okay"; + }; + + mmc1: mmc@f8000000 { + pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>; + status = "okay"; + slot@0 { + reg = <0>; + bus-width = <4>; + cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>; + }; + }; + + spi1: spi@f8008000 { + cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>; + status = "okay"; + }; + + adc0: adc@f8018000 { + pinctrl-0 = < + &pinctrl_adc0_adtrg + &pinctrl_adc0_ad0 + &pinctrl_adc0_ad1 + &pinctrl_adc0_ad2 + &pinctrl_adc0_ad3 + &pinctrl_adc0_ad4 + &pinctrl_adc0_ad5 + &pinctrl_adc0_ad6 + &pinctrl_adc0_ad7 + &pinctrl_adc0_ad8 + &pinctrl_adc0_ad9 + >; + status = "okay"; + }; + + i2c2: i2c@f801c000 { + dmas = <0>, <0>; /* Do not use DMA for i2c2 */ + status = "okay"; + }; + + macb1: ethernet@f802c000 { + phy-mode = "rmii"; + status = "okay"; + }; + + dbgu: serial@ffffee00 { + status = "okay"; + }; + + pinctrl@fffff200 { + board { + pinctrl_mmc0_cd: mmc0_cd { + atmel,pins = + <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; + + pinctrl_mmc1_cd: mmc1_cd { + atmel,pins = + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; + + pinctrl_usba_vbus: usba_vbus { + atmel,pins = + <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */ + }; + }; + }; + + pmc: pmc@fffffc00 { + main: mainck { + clock-frequency = <12000000>; + }; + }; + }; + + nand0: nand@60000000 { + nand-bus-width = <8>; + nand-ecc-mode = "hw"; + atmel,has-pmecc; + atmel,pmecc-cap = <4>; + atmel,pmecc-sector-size = <512>; + nand-on-flash-bbt; + status = "okay"; + + at91bootstrap@0 { + label = "at91bootstrap"; + reg = <0x0 0x40000>; + }; + + bootloader@40000 { + label = "bootloader"; + reg = <0x40000 0x80000>; + }; + + bootloaderenv@c0000 { + label = "bootloader env"; + reg = <0xc0000 0xc0000>; + }; + + dtb@180000 { + label = "device tree"; + reg = <0x180000 0x80000>; + }; + + kernel@200000 { + label = "kernel"; + reg = <0x200000 0x600000>; + }; + + rootfs@800000 { + label = "rootfs"; + reg = <0x800000 0x0f800000>; + }; + }; + + usb0: gadget@00500000 { + atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; + }; + + usb1: ohci@00600000 { + num-ports = <3>; + atmel,vbus-gpio = <0 + &pioE 3 GPIO_ACTIVE_LOW + &pioE 4 GPIO_ACTIVE_LOW + >; + status = "okay"; + }; + + usb2: ehci@00700000 { + status = "okay"; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + + bp3 { + label = "PB_USER"; + gpios = <&pioE 29 GPIO_ACTIVE_LOW>; + linux,code = <0x104>; + gpio-key,wakeup; + }; + }; + + leds { + compatible = "gpio-leds"; + + d2 { + label = "d2"; + gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */ + linux,default-trigger = "heartbeat"; + }; + + d3 { + label = "d3"; + gpios = <&pioE 24 GPIO_ACTIVE_HIGH>; + }; + }; +}; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 0042f73068b0..fece8665fb63 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -523,7 +523,7 @@ }; i2c0: i2c@fff88000 { - compatible = "atmel,at91sam9263-i2c"; + compatible = "atmel,at91sam9260-i2c"; reg = <0xfff88000 0x100>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index e9487f6f0166..924a6a6ffd0f 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -124,6 +124,10 @@ nand-on-flash-bbt; status = "okay"; }; + + usb0: ohci@00500000 { + status = "okay"; + }; }; leds { diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts index fd8fc7cd53f3..5bfae54fb780 100644 --- a/arch/arm/boot/dts/imx6dl-hummingboard.dts +++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts @@ -52,12 +52,6 @@ }; }; - codec: spdif-transmitter { - compatible = "linux,spdif-dit"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hummingboard_spdif>; - }; - sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; @@ -111,7 +105,7 @@ }; pinctrl_hummingboard_spdif: hummingboard-spdif { - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; }; pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { @@ -142,6 +136,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hummingboard_spdif>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 64daa3b311f6..c2a24888a276 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -46,12 +46,6 @@ }; }; - codec: spdif-transmitter { - compatible = "linux,spdif-dit"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_cubox_i_spdif>; - }; - sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; @@ -89,7 +83,7 @@ }; pinctrl_cubox_i_spdif: cubox-i-spdif { - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; }; pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { @@ -121,6 +115,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_spdif>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 52447c17537a..3d5faf85f51b 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1228,7 +1228,7 @@ compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00600000 0x100000>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, + clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; status = "disabled"; diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 0c1e8d871ed1..6cb9b68e2188 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -188,7 +188,6 @@ msp2: msp@80117000 { pinctrl-names = "default"; pinctrl-0 = <&msp2_default_mode>; - status = "okay"; }; msp3: msp@80125000 { diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 845bc745706b..ee6982976d66 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y CONFIG_ARCH_OMAP4=y CONFIG_SOC_OMAP5=y CONFIG_SOC_AM33XX=y +CONFIG_SOC_DRA7XX=y CONFIG_SOC_AM43XX=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SOCFPGA=y diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e9a49fe0284e..8b8b61685a34 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, static inline void __flush_icache_all(void) { __flush_icache_preferred(); + dsb(); } /* diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 03243f7eeddf..85c60adc8b60 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -120,13 +120,16 @@ /* * 2nd stage PTE definitions for LPAE. */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) -#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ + +#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index ef3c6072aa45..ac4bfae26702 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -37,18 +37,9 @@ static inline void dsb_sev(void) { -#if __LINUX_ARM_ARCH__ >= 7 - __asm__ __volatile__ ( - "dsb ishst\n" - SEV - ); -#else - __asm__ __volatile__ ( - "mcr p15, 0, %0, c7, c10, 4\n" - SEV - : : "r" (0) - ); -#endif + + dsb(ishst); + __asm__(SEV); } /* diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b0df9761de6d..1e8b030dbefd 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { - res = memblock_virt_alloc_low(sizeof(*res), 0); + res = memblock_virt_alloc(sizeof(*res), 0); res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig index 8f4649b301b2..1abae5f6a418 100644 --- a/arch/arm/mach-hisi/Kconfig +++ b/arch/arm/mach-hisi/Kconfig @@ -8,7 +8,7 @@ config ARCH_HI3xxx select CLKSRC_OF select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU - select HAVE_ARM_TWD + select HAVE_ARM_TWD if SMP select HAVE_SMP select PINCTRL select PINCTRL_SINGLE diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index af2e582d2b74..4d677f442539 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[lvds1_sel], clk[sata_ref]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c index 3781a1853998..4c86f3035205 100644 --- a/arch/arm/mach-imx/clk-imx6sl.c +++ b/arch/arm/mach-imx/clk-imx6sl.c @@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) /* Audio-related clocks configuration */ clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index 9d47adc078aa..7a9b98589db7 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c @@ -236,8 +236,6 @@ void __init imx6q_pm_init(void) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, IMX6Q_GPR1_GINT); - /* Set initial power mode */ - imx6q_set_lpm(WAIT_CLOCKED); suspend_set_ops(&imx6q_pm_ops); } diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig index ba470d64493b..3795ae28a613 100644 --- a/arch/arm/mach-moxart/Kconfig +++ b/arch/arm/mach-moxart/Kconfig @@ -2,7 +2,6 @@ config ARCH_MOXART bool "MOXA ART SoC" if ARCH_MULTI_V4T select CPU_FA526 select ARM_DMA_MEM_BUFFERABLE - select DMA_OF select USE_OF select CLKSRC_OF select CLKSRC_MMIO diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 653b489479e0..e2ce4f8366a7 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -54,7 +54,7 @@ config SOC_OMAP5 select ARM_GIC select CPU_V7 select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if LOCAL_TIMERS + select HAVE_ARM_TWD if SMP select HAVE_SMP select HAVE_ARM_ARCH_TIMER select ARM_ERRATA_798181 if SMP diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c index c9f309ae88c5..8b90c4f2d430 100644 --- a/arch/arm/mach-pxa/am300epd.c +++ b/arch/arm/mach-pxa/am300epd.c @@ -30,6 +30,7 @@ #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> +#include <mach/irqs.h> #include <linux/platform_data/video-pxafb.h> #include "generic.h" diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h index 954641e6c8b1..1b0825911e62 100644 --- a/arch/arm/mach-pxa/include/mach/balloon3.h +++ b/arch/arm/mach-pxa/include/mach/balloon3.h @@ -14,6 +14,8 @@ #ifndef ASM_ARCH_BALLOON3_H #define ASM_ARCH_BALLOON3_H +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + enum balloon3_features { BALLOON3_FEATURE_OHCI, BALLOON3_FEATURE_MMC, diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h index f3c3493b468d..c030d955bbd7 100644 --- a/arch/arm/mach-pxa/include/mach/corgi.h +++ b/arch/arm/mach-pxa/include/mach/corgi.h @@ -13,6 +13,7 @@ #ifndef __ASM_ARCH_CORGI_H #define __ASM_ARCH_CORGI_H 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ /* * Corgi (Non Standard) GPIO Definitions diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h index 2628e7b72116..00cfbbbf73f7 100644 --- a/arch/arm/mach-pxa/include/mach/csb726.h +++ b/arch/arm/mach-pxa/include/mach/csb726.h @@ -11,6 +11,8 @@ #ifndef CSB726_H #define CSB726_H +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + #define CSB726_GPIO_IRQ_LAN 52 #define CSB726_GPIO_IRQ_SM501 53 #define CSB726_GPIO_MMC_DETECT 100 diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h index dba14b6503ad..f7df27bbb42e 100644 --- a/arch/arm/mach-pxa/include/mach/gumstix.h +++ b/arch/arm/mach-pxa/include/mach/gumstix.h @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* BTRESET - Reset line to Bluetooth module, active low signal. */ #define GPIO_GUMSTIX_BTRESET 7 diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h index 22a96f87232b..7e63f4680271 100644 --- a/arch/arm/mach-pxa/include/mach/idp.h +++ b/arch/arm/mach-pxa/include/mach/idp.h @@ -23,6 +23,7 @@ * IDP hardware. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ #define IDP_FLASH_PHYS (PXA_CS0_PHYS) #define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h index 2c4471336570..b184f296023b 100644 --- a/arch/arm/mach-pxa/include/mach/palmld.h +++ b/arch/arm/mach-pxa/include/mach/palmld.h @@ -13,6 +13,8 @@ #ifndef _INCLUDE_PALMLD_H_ #define _INCLUDE_PALMLD_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h index 0bd4f036c72f..e342c5921405 100644 --- a/arch/arm/mach-pxa/include/mach/palmt5.h +++ b/arch/arm/mach-pxa/include/mach/palmt5.h @@ -15,6 +15,8 @@ #ifndef _INCLUDE_PALMT5_H_ #define _INCLUDE_PALMT5_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h index c383a21680b6..81c727b3cfd2 100644 --- a/arch/arm/mach-pxa/include/mach/palmtc.h +++ b/arch/arm/mach-pxa/include/mach/palmtc.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTC_H_ #define _INCLUDE_PALMTC_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h index f2e530380253..92bc1f05300d 100644 --- a/arch/arm/mach-pxa/include/mach/palmtx.h +++ b/arch/arm/mach-pxa/include/mach/palmtx.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTX_H_ #define _INCLUDE_PALMTX_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h index 6bf28de228bd..86ebd7b6c960 100644 --- a/arch/arm/mach-pxa/include/mach/pcm027.h +++ b/arch/arm/mach-pxa/include/mach/pcm027.h @@ -23,6 +23,8 @@ * Definitions of CPU card resources only */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* phyCORE-PXA270 (PCM027) Interrupts */ #define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) #define PCM027_BTDET_IRQ PCM027_IRQ(0) diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h index 0260aaa2fc17..7e544c14967e 100644 --- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h +++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h @@ -20,6 +20,7 @@ */ #include <mach/pcm027.h> +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* * definitions relevant only when the PCM-990 diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h index f32ff75dcca8..b56b19351a03 100644 --- a/arch/arm/mach-pxa/include/mach/poodle.h +++ b/arch/arm/mach-pxa/include/mach/poodle.h @@ -15,6 +15,8 @@ #ifndef __ASM_ARCH_POODLE_H #define __ASM_ARCH_POODLE_H 1 +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* * GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h index 0bfe6507c95d..25c9f62e46aa 100644 --- a/arch/arm/mach-pxa/include/mach/spitz.h +++ b/arch/arm/mach-pxa/include/mach/spitz.h @@ -15,8 +15,8 @@ #define __ASM_ARCH_SPITZ_H 1 #endif +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */ #include <linux/fb.h> -#include <linux/gpio.h> /* Spitz/Akita GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h index 2bb0e862598c..0497d95cef25 100644 --- a/arch/arm/mach-pxa/include/mach/tosa.h +++ b/arch/arm/mach-pxa/include/mach/tosa.h @@ -13,6 +13,8 @@ #ifndef _ASM_ARCH_TOSA_H_ #define _ASM_ARCH_TOSA_H_ 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + /* TOSA Chip selects */ #define TOSA_LCDC_PHYS PXA_CS4_PHYS /* Internel Scoop */ diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h index d2ca01053f69..ae3ca013afab 100644 --- a/arch/arm/mach-pxa/include/mach/trizeps4.h +++ b/arch/arm/mach-pxa/include/mach/trizeps4.h @@ -10,6 +10,8 @@ #ifndef _TRIPEPS4_H_ #define _TRIPEPS4_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* physical memory regions */ #define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ #define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 338640631e08..05fa505df585 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI select CPU_V7 select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if LOCAL_TIMERS + select HAVE_ARM_TWD if SMP select HAVE_SMP select ARM_GIC select MIGHT_HAVE_CACHE_L2X0 diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 1db2a5ca9ab8..8c09a8393fb6 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -25,6 +25,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of.h> +#include <linux/memblock.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> @@ -41,6 +42,18 @@ void __iomem *zynq_scu_base; +/** + * zynq_memory_init - Initialize special memory + * + * We need to stop things allocating the low memory as DMA can't work in + * the 1st 512K of memory. + */ +static void __init zynq_memory_init(void) +{ + if (!__pa(PAGE_OFFSET)) + memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir)); +} + static struct platform_device zynq_cpuidle_device = { .name = "cpuidle-zynq", }; @@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") .init_machine = zynq_init_machine, .init_time = zynq_timer_init, .dt_compat = zynq_dt_match, + .reserve = zynq_memory_init, .restart = zynq_system_reset, MACHINE_END diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d5a982d15a88..7ea641b7aa7d 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) struct mem_type { pteval_t prot_pte; + pteval_t prot_pte_s2; pmdval_t prot_l1; pmdval_t prot_sect; unsigned int domain; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f08c133cc25..a623cb3ad012 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -232,12 +232,16 @@ __setup("noalign", noalign_setup); #endif /* ifdef CONFIG_CPU_CP15 / else */ #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | + s2_policy(L_PTE_S2_MT_DEV_SHARED) | + L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, @@ -508,7 +512,8 @@ static void __init build_mem_type_table(void) cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; s2_pgprot = cp->pte_s2; - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; /* * ARMv6 and above have extended page tables. diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 45dc29f85d56..32b3558321c4 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -208,7 +208,6 @@ __v6_setup: mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register @@ -218,6 +217,8 @@ __v6_setup: ALT_UP(orr r8, r8, #TTB_FLAGS_UP) mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and + @ complete invalidations adr r5, v6_crval ldmia r5, {r5, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index bd1781979a39..74f6033e76dd 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -351,7 +351,6 @@ __v7_setup: 4: mov r10, #0 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate - dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup @@ -360,6 +359,7 @@ __v7_setup: mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif + dsb @ Complete invalidations #ifndef CONFIG_ARM_THUMBEE mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE and r0, r0, #(0xf << 12) @ ThumbEE enabled field diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 495ab6f84a61..eaf54a30bedc 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -148,6 +148,15 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile index 22fb66590dcd..dba48a5d5bb9 100644 --- a/arch/avr32/Makefile +++ b/arch/avr32/Makefile @@ -11,7 +11,7 @@ all: uImage vmlinux.elf KBUILD_DEFCONFIG := atstk1002_defconfig -KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic +KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__ KBUILD_AFLAGS += -mrelax -mno-pic KBUILD_CFLAGS_MODULE += -mno-relax LDFLAGS_vmlinux += --relax diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c index 9764a1a1073e..c1466a872b9c 100644 --- a/arch/avr32/boards/mimc200/fram.c +++ b/arch/avr32/boards/mimc200/fram.c @@ -11,6 +11,7 @@ #define FRAM_VERSION "1.0" #include <linux/miscdevice.h> +#include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/io.h> diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index cfb9fe1b8df9..c7c64a63c29f 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -17,5 +17,6 @@ generic-y += scatterlist.h generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h +generic-y += vga.h generic-y += xor.h generic-y += hash.h diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index fc6483f83ccc..4f5ec2bb7172 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr); #define iounmap(addr) \ __iounmap(addr) +#define ioremap_wc ioremap_nocache + #define cached(addr) P1SEGADDR(addr) #define uncached(addr) P2SEGADDR(addr) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e27e9ad6818e..150866b2a3fe 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask) } extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask); #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 9e39ceb1d19f..d4dd41fb951b 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -172,10 +172,20 @@ struct eeh_ops { }; extern struct eeh_ops *eeh_ops; -extern int eeh_subsystem_enabled; +extern bool eeh_subsystem_enabled; extern raw_spinlock_t confirm_error_lock; extern int eeh_probe_mode; +static inline bool eeh_enabled(void) +{ + return eeh_subsystem_enabled; +} + +static inline void eeh_set_enable(bool mode) +{ + eeh_subsystem_enabled = mode; +} + #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ @@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *); * If this macro yields TRUE, the caller relays to eeh_check_failure() * which does further tests out of line. */ -#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) +#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled()) /* * Reads from a device which has been isolated by EEH will return @@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *); #else /* !CONFIG_EEH */ +static inline bool eeh_enabled(void) +{ + return false; +} + +static inline void eeh_set_enable(bool mode) { } + static inline int eeh_init(void) { return 0; diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index d750336b171d..623f2971ce0e 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_PPC64 - return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); + return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); #else return __pte(pte_update(ptep, ~0UL, 0)); #endif diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f7a8036579b5..42632c7a2a4e 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -77,6 +77,7 @@ struct iommu_table { #ifdef CONFIG_IOMMU_API struct iommu_group *it_group; #endif + void (*set_bypass)(struct iommu_table *tbl, bool enable); }; /* Pure 2^n version of get_order */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index bc141c950b1e..eb9261024f51 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, + unsigned long set, int huge) { #ifdef PTE_ATOMIC_UPDATES @@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm, andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ + or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*ptep) - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) : "cc" ); #else unsigned long old = pte_val(*ptep); - *ptep = __pte(old & ~clr); + *ptep = __pte((old & ~clr) | set); #endif /* huge pages use the old page table lock */ if (!huge) @@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, { unsigned long old; - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); return (old & _PAGE_ACCESSED) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - pte_update(mm, addr, ptep, _PAGE_RW, 0); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, @@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - pte_update(mm, addr, ptep, _PAGE_RW, 1); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); } /* @@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); return __pte(old); } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { - pte_update(mm, addr, ptep, ~0UL, 0); + pte_update(mm, addr, ptep, ~0UL, 0, 0); } @@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma, extern unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long clr); + pmd_t *pmdp, + unsigned long clr, + unsigned long set); static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); return ((old & _PAGE_ACCESSED) != 0); } @@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, if ((pmd_val(*pmdp) & _PAGE_RW) == 0) return; - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); } #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f83b6f3e1b39..3ebb188c3ff5 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte) return pte; } +#define ptep_set_numa ptep_set_numa +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) + VM_BUG_ON(1); + + pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); + return; +} + #define pmd_numa pmd_numa static inline int pmd_numa(pmd_t pmd) { return pte_numa(pmd_pte(pmd)); } +#define pmdp_set_numa pmdp_set_numa +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) + VM_BUG_ON(1); + + pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); + return; +} + #define pmd_mknonnuma pmd_mknonnuma static inline pmd_t pmd_mknonnuma(pmd_t pmd) { diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4ee06fe15de4..d0e784e0ff48 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,6 +8,7 @@ #ifdef __powerpc64__ +extern char __start_interrupts[]; extern char __end_interrupts[]; extern char __prom_init_toc_start[]; @@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr) return 0; } +static inline int overlaps_interrupt_vector_text(unsigned long start, + unsigned long end) +{ + unsigned long real_start, real_end; + real_start = __start_interrupts - _stext; + real_end = __end_interrupts - _stext; + + return start < (unsigned long)__va(real_end) && + (unsigned long)__va(real_start) < end; +} + static inline int overlaps_kernel_text(unsigned long start, unsigned long end) { return start < (unsigned long)__init_end && diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 0d9cecddf8a4..c53f5f6d1761 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -4,11 +4,11 @@ #ifdef __KERNEL__ /* Default link addresses for the vDSOs */ -#define VDSO32_LBASE 0x100000 -#define VDSO64_LBASE 0x100000 +#define VDSO32_LBASE 0x0 +#define VDSO64_LBASE 0x0 /* Default map addresses for 32bit vDSO */ -#define VDSO32_MBASE VDSO32_LBASE +#define VDSO32_MBASE 0x100000 #define VDSO_VERSION_STRING LINUX_2.6.15 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97ccdcb..ee78f6e49d64 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -int dma_set_mask(struct device *dev, u64 dma_mask) +int __dma_set_mask(struct device *dev, u64 dma_mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - if (ppc_md.dma_set_mask) - return ppc_md.dma_set_mask(dev, dma_mask); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) @@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) *dev->dma_mask = dma_mask; return 0; } +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (ppc_md.dma_set_mask) + return ppc_md.dma_set_mask(dev, dma_mask); + return __dma_set_mask(dev, dma_mask); +} EXPORT_SYMBOL(dma_set_mask); u64 dma_get_required_mask(struct device *dev) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 148db72a8c43..e7b76a6bf150 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -28,6 +28,7 @@ #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/rbtree.h> +#include <linux/reboot.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/export.h> @@ -89,7 +90,7 @@ /* Platform dependent EEH operations */ struct eeh_ops *eeh_ops = NULL; -int eeh_subsystem_enabled; +bool eeh_subsystem_enabled = false; EXPORT_SYMBOL(eeh_subsystem_enabled); /* @@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) eeh_stats.total_mmio_ffs++; - if (!eeh_subsystem_enabled) + if (!eeh_enabled()) return 0; if (!edev) { @@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name) return -EEXIST; } +static int eeh_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + eeh_set_enable(false); + return NOTIFY_DONE; +} + +static struct notifier_block eeh_reboot_nb = { + .notifier_call = eeh_reboot_notifier, +}; + /** * eeh_init - EEH initialization * @@ -778,6 +790,14 @@ int eeh_init(void) if (machine_is(powernv) && cnt++ <= 0) return ret; + /* Register reboot notifier */ + ret = register_reboot_notifier(&eeh_reboot_nb); + if (ret) { + pr_warn("%s: Failed to register notifier (%d)\n", + __func__, ret); + return ret; + } + /* call platform initialization function */ if (!eeh_ops) { pr_warning("%s: Platform EEH operation not found\n", @@ -822,7 +842,7 @@ int eeh_init(void) return ret; } - if (eeh_subsystem_enabled) + if (eeh_enabled()) pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); else pr_warning("EEH: No capable adapters found\n"); @@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev) struct device_node *dn; struct eeh_dev *edev; - if (!dev || !eeh_subsystem_enabled) + if (!dev || !eeh_enabled()) return; pr_debug("EEH: Adding device %s\n", pci_name(dev)); @@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev) { struct eeh_dev *edev; - if (!dev || !eeh_subsystem_enabled) + if (!dev || !eeh_enabled()) return; edev = pci_dev_to_eeh_dev(dev); @@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev) static int proc_eeh_show(struct seq_file *m, void *v) { - if (0 == eeh_subsystem_enabled) { + if (!eeh_enabled()) { seq_printf(m, "EEH Subsystem is globally disabled\n"); seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); } else { diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7bb30dca4e19..fdc679d309ec 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata) */ if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) return NULL; + driver = eeh_pcid_get(dev); - if (driver && driver->err_handler) - return NULL; + if (driver) { + eeh_pcid_put(dev); + if (driver->err_handler) + return NULL; + } /* Remove it from PCI subsystem */ pr_debug("EEH: Removing %s without EEH sensitive driver\n", diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d773dd440a45..88e3ec6e1d96 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) memset(tbl->it_map, 0xff, sz); iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + /* + * Disable iommu bypass, otherwise the user can DMA to all of + * our physical memory via the bypass window instead of just + * the pages that has been explicitly mapped into the iommu + */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, false); + return 0; } EXPORT_SYMBOL_GPL(iommu_take_ownership); @@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) /* Restore bit#0 set by iommu_init_table() */ if (tbl->it_offset == 0) set_bit(0, tbl->it_map); + + /* The kernel owns the device now, we can restore the iommu bypass */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, true); } EXPORT_SYMBOL_GPL(iommu_release_ownership); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9729b23bfb0a..1d0848bba049 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void) #ifdef CONFIG_PPC64 cpu_nr = i; #else +#ifdef CONFIG_SMP cpu_nr = get_hard_smp_processor_id(i); +#else + cpu_nr = 0; #endif +#endif + memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 75d4f7340da8..015ae55c1868 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) /* Values we need to export to the second kernel via the device tree. */ static phys_addr_t kernel_end; +static phys_addr_t crashk_base; static phys_addr_t crashk_size; +static unsigned long long mem_limit; static struct property kernel_end_prop = { .name = "linux,kernel-end", @@ -207,7 +209,7 @@ static struct property kernel_end_prop = { static struct property crashk_base_prop = { .name = "linux,crashkernel-base", .length = sizeof(phys_addr_t), - .value = &crashk_res.start, + .value = &crashk_base }; static struct property crashk_size_prop = { @@ -219,9 +221,11 @@ static struct property crashk_size_prop = { static struct property memory_limit_prop = { .name = "linux,memory-limit", .length = sizeof(unsigned long long), - .value = &memory_limit, + .value = &mem_limit, }; +#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) + static void __init export_crashk_values(struct device_node *node) { struct property *prop; @@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) of_remove_property(node, prop); if (crashk_res.start != 0) { + crashk_base = cpu_to_be_ulong(crashk_res.start), of_add_property(node, &crashk_base_prop); - crashk_size = resource_size(&crashk_res); + crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); of_add_property(node, &crashk_size_prop); } @@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ + mem_limit = cpu_to_be_ulong(memory_limit); of_update_property(node, &memory_limit_prop); } @@ -264,7 +270,7 @@ static int __init kexec_setup(void) of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); + kernel_end = cpu_to_be_ulong(__pa(_end)); of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be4e6d648f60..59d229a2a3e0 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) /* Values we need to export to the second kernel via the device tree. */ static unsigned long htab_base; +static unsigned long htab_size; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -379,7 +380,7 @@ static struct property htab_base_prop = { static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = &htab_size_bytes, + .value = &htab_size, }; static int __init export_htab_values(void) @@ -403,8 +404,9 @@ static int __init export_htab_values(void) if (prop) of_remove_property(node, prop); - htab_base = __pa(htab_address); + htab_base = cpu_to_be64(__pa(htab_address)); of_add_property(node, &htab_base_prop); + htab_size = cpu_to_be64(htab_size_bytes); of_add_property(node, &htab_size_prop); of_node_put(node); diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 879f09620f83..7c6bb4b17b49 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq) mtlr r0 blr +/* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); + */ _GLOBAL(call_do_irq) mflr r0 stw r0,4(r1) lwz r10,THREAD+KSP_LIMIT(r2) - addi r11,r3,THREAD_INFO_GAP + addi r11,r4,THREAD_INFO_GAP stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 stw r10,8(r1) diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index b47a0e1ab001..1482327cfeba 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -69,8 +69,8 @@ _GLOBAL(relocate) * R_PPC64_RELATIVE ones. */ mtctr r8 -5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ - cmpwi r0,R_PPC64_RELATIVE +5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ + cmpdi r0,R_PPC64_RELATIVE bne 6f ld r6,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2b0da27eaee4..04cc4fcca78b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void) /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { +#ifdef CONFIG_SMP hw_cpu = get_hard_smp_processor_id(i); +#else + hw_cpu = 0; +#endif + critirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S index 79683d0393f5..6ac107ac402a 100644 --- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S @@ -6,7 +6,7 @@ .globl vdso32_start, vdso32_end .balign PAGE_SIZE vdso32_start: - .incbin "arch/powerpc/kernel/vdso32/vdso32.so" + .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" .balign PAGE_SIZE vdso32_end: diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S index 8df9e2463007..df60fca6a13d 100644 --- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S @@ -6,7 +6,7 @@ .globl vdso64_start, vdso64_end .balign PAGE_SIZE vdso64_start: - .incbin "arch/powerpc/kernel/vdso64/vdso64.so" + .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" .balign PAGE_SIZE vdso64_end: diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index de6881259aef..d766d6ee33fe 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; + /* + * If relocatable, check if it overlaps interrupt vectors that + * are copied down to real 0. For relocatable kernel + * (e.g. kdump case) we copy interrupt vectors down to real + * address 0. Mark that region as executable. This is + * because on p8 system with relocation on exception feature + * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence + * in order to execute the interrupt handlers in virtual + * mode the vector region need to be marked as executable. + */ + if ((PHYSICAL_START > MEMORY_START) && + overlaps_interrupt_vector_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 65b7b65e8708..62bf5e8e78da 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, } unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long clr) + pmd_t *pmdp, unsigned long clr, + unsigned long set) { unsigned long old, tmp; @@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ + or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) - : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) : "cc" ); #else old = pmd_val(*pmdp); - *pmdp = __pmd(old & ~clr); + *pmdp = __pmd((old & ~clr) | set); #endif if (old & _PAGE_HASHPTE) hpte_do_hugepage_flush(mm, addr, pmdp); @@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); } /* @@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long old; pgtable_t *pgtable_slot; - old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); /* * We have pmd == none and we are holding page_table_lock. diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index a770df2dae70..6c0b1f5f8d2c 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, pte = pte_offset_map_lock(mm, pmd, addr, &ptl); arch_enter_lazy_mmu_mode(); for (; npages > 0; --npages) { - pte_update(mm, addr, pte, 0, 0); + pte_update(mm, addr, pte, 0, 0, 0); addr += PAGE_SIZE; ++pte; } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 29b89e863d7c..67cf22083f4c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu) mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); mb(); + if (cpuhw->bhrb_users) + ppmu->config_bhrb(cpuhw->bhrb_filter); + write_mmcr0(cpuhw, mmcr0); /* @@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu) } out: - if (cpuhw->bhrb_users) - ppmu->config_bhrb(cpuhw->bhrb_filter); local_irq_restore(flags); } diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index a3f7abd2f13f..96cee20dcd34 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -25,6 +25,37 @@ #define PM_BRU_FIN 0x10068 #define PM_BR_MPRED_CMPL 0x400f6 +/* All L1 D cache load references counted at finish, gated by reject */ +#define PM_LD_REF_L1 0x100ee +/* Load Missed L1 */ +#define PM_LD_MISS_L1 0x3e054 +/* Store Missed L1 */ +#define PM_ST_MISS_L1 0x300f0 +/* L1 cache data prefetches */ +#define PM_L1_PREF 0x0d8b8 +/* Instruction fetches from L1 */ +#define PM_INST_FROM_L1 0x04080 +/* Demand iCache Miss */ +#define PM_L1_ICACHE_MISS 0x200fd +/* Instruction Demand sectors wriittent into IL1 */ +#define PM_L1_DEMAND_WRITE 0x0408c +/* Instruction prefetch written into IL1 */ +#define PM_IC_PREF_WRITE 0x0408e +/* The data cache was reloaded from local core's L3 due to a demand load */ +#define PM_DATA_FROM_L3 0x4c042 +/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ +#define PM_DATA_FROM_L3MISS 0x300fe +/* All successful D-side store dispatches for this thread */ +#define PM_L2_ST 0x17080 +/* All successful D-side store dispatches for this thread that were L2 Miss */ +#define PM_L2_ST_MISS 0x17082 +/* Total HW L3 prefetches(Load+store) */ +#define PM_L3_PREF_ALL 0x4e052 +/* Data PTEG reload */ +#define PM_DTLB_MISS 0x300fc +/* ITLB Reloaded */ +#define PM_ITLB_MISS 0x400fc + /* * Raw event encoding for POWER8: @@ -557,6 +588,8 @@ static int power8_generic_events[] = { [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, }; static u64 power8_bhrb_filter_map(u64 branch_sample_type) @@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, + [ C(RESULT_MISS) ] = PM_LD_MISS_L1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ST_MISS_L1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_PREF, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, + [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, + [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L2_ST, + [ C(RESULT_MISS) ] = PM_L2_ST_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_DTLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ITLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_BRU_FIN, + [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +#undef C + static struct power_pmu power8_pmu = { .name = "POWER8", .n_counter = 6, @@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = { .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, + .cache_events = &power8_cache_events, .attr_groups = power8_pmu_attr_groups, .bhrb_nr = 32, }; diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index e1e71618b70c..f51474336460 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb, /* We simply send special EEH event */ if ((changed_evts & OPAL_EVENT_PCI_ERROR) && - (events & OPAL_EVENT_PCI_ERROR)) + (events & OPAL_EVENT_PCI_ERROR) && + eeh_enabled()) eeh_send_failure_event(NULL); return 0; @@ -489,8 +490,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose, static int ioda_eeh_reset(struct eeh_pe *pe, int option) { struct pci_controller *hose = pe->phb; - struct eeh_dev *edev; - struct pci_dev *dev; + struct pci_bus *bus; int ret; /* @@ -519,31 +519,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) if (pe->type & EEH_PE_PHB) { ret = ioda_eeh_phb_reset(hose, option); } else { - if (pe->type & EEH_PE_DEVICE) { - /* - * If it's device PE, we didn't refer to the parent - * PCI bus yet. So we have to figure it out indirectly. - */ - edev = list_first_entry(&pe->edevs, - struct eeh_dev, list); - dev = eeh_dev_to_pci_dev(edev); - dev = dev->bus->self; - } else { - /* - * If it's bus PE, the parent PCI bus is already there - * and just pick it up. - */ - dev = pe->bus->self; - } - - /* - * Do reset based on the fact that the direct upstream bridge - * is root bridge (port) or not. - */ - if (dev->bus->number == 0) + bus = eeh_pe_bus_get(pe); + if (pci_is_root_bus(bus)) ret = ioda_eeh_root_reset(hose, option); else - ret = ioda_eeh_bridge_reset(hose, dev, option); + ret = ioda_eeh_bridge_reset(hose, bus->self, option); } return ret; diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index a79fddc5e74e..a59788e83b8b 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) * Enable EEH explicitly so that we will do EEH check * while accessing I/O stuff */ - eeh_subsystem_enabled = 1; + eeh_set_enable(true); /* Save memory bars */ eeh_save_bars(edev); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7d6dcc6d5fa9..3b2b4fb3585b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -21,6 +21,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> +#include <linux/memblock.h> #include <asm/sections.h> #include <asm/io.h> @@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev return; pe = &phb->ioda.pe_array[pdn->pe_number]; + WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); } +static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, + struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_dn *pdn = pci_get_pdn(pdev); + struct pnv_ioda_pe *pe; + uint64_t top; + bool bypass = false; + + if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) + return -ENODEV;; + + pe = &phb->ioda.pe_array[pdn->pe_number]; + if (pe->tce_bypass_enabled) { + top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; + bypass = (dma_mask >= top); + } + + if (bypass) { + dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); + set_dma_ops(&pdev->dev, &dma_direct_ops); + set_dma_offset(&pdev->dev, pe->tce_bypass_base); + } else { + dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(&pdev->dev, &dma_iommu_ops); + set_iommu_table_base(&pdev->dev, &pe->tce32_table); + } + return 0; +} + static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; @@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); } +static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) +{ + struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, + tce32_table); + uint16_t window_id = (pe->pe_number << 1 ) + 1; + int64_t rc; + + pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); + if (enable) { + phys_addr_t top = memblock_end_of_DRAM(); + + top = roundup_pow_of_two(top); + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + top); + } else { + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + 0); + + /* + * We might want to reset the DMA ops of all devices on + * this PE. However in theory, that shouldn't be necessary + * as this is used for VFIO/KVM pass-through and the device + * hasn't yet been returned to its kernel driver + */ + } + if (rc) + pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); + else + pe->tce_bypass_enabled = enable; +} + +static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe) +{ + /* TVE #1 is selected by PCI address bit 59 */ + pe->tce_bypass_base = 1ull << 59; + + /* Install set_bypass callback for VFIO */ + pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; + + /* Enable bypass by default */ + pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); +} + static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { @@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, else pnv_ioda_setup_bus_dma(pe, pe->pbus); + /* Also create a bypass window */ + pnv_pci_ioda2_setup_bypass_pe(phb, pe); return; fail: if (pe->tce32_seg >= 0) @@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; + phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; /* Setup shutdown function for kexec */ phb->shutdown = pnv_pci_ioda_shutdown; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b555ebc57ef5..95633d79ef5d 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) pnv_pci_dma_fallback_setup(hose, pdev); } +int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + struct pnv_phb *phb = hose->private_data; + + if (phb && phb->dma_set_mask) + return phb->dma_set_mask(phb, pdev, dma_mask); + return __dma_set_mask(&pdev->dev, dma_mask); +} + void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 13f1942a9a5f..cde169442775 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -54,7 +54,9 @@ struct pnv_ioda_pe { struct iommu_table tce32_table; phys_addr_t tce_inval_reg_phys; - /* XXX TODO: Add support for additional 64-bit iommus */ + /* 64-bit TCE bypass region */ + bool tce_bypass_enabled; + uint64_t tce_bypass_base; /* MSIs. MVE index is identical for for 32 and 64 bit MSI * and -1 if not supported. (It's actually identical to the @@ -113,6 +115,8 @@ struct pnv_phb { unsigned int hwirq, unsigned int virq, unsigned int is_64, struct msi_msg *msg); void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); + int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, + u64 dma_mask); void (*fixup_phb)(struct pci_controller *hose); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); void (*shutdown)(struct pnv_phb *phb); diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index de6819be1f95..0051e108ef0f 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -7,12 +7,20 @@ extern void pnv_smp_init(void); static inline void pnv_smp_init(void) { } #endif +struct pci_dev; + #ifdef CONFIG_PCI extern void pnv_pci_init(void); extern void pnv_pci_shutdown(void); +extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); #else static inline void pnv_pci_init(void) { } static inline void pnv_pci_shutdown(void) { } + +static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + return -ENODEV; +} #endif extern void pnv_lpc_init(void); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 21166f65c97c..110f4fbd319f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -27,6 +27,7 @@ #include <linux/interrupt.h> #include <linux/bug.h> #include <linux/cpuidle.h> +#include <linux/pci.h> #include <asm/machdep.h> #include <asm/firmware.h> @@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex) { } +static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (dev_is_pci(dev)) + return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); + return __dma_set_mask(dev, dma_mask); +} + static void pnv_shutdown(void) { /* Let the PCI code clear up IODA tables */ @@ -238,6 +246,7 @@ define_machine(powernv) { .machine_shutdown = pnv_shutdown, .power_save = powernv_idle, .calibrate_decr = generic_calibrate_decr, + .dma_set_mask = pnv_dma_set_mask, #ifdef CONFIG_KEXEC .kexec_cpu_down = pnv_kexec_cpu_down, #endif diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 37300f6ee244..80b1d57c306a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -20,6 +20,7 @@ config PPC_PSERIES select PPC_DOORBELL select HAVE_CONTEXT_TRACKING select HOTPLUG_CPU if SMP + select ARCH_RANDOM default y config PPC_SPLPAR diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 9ef3cc8ebc11..8a8f0472d98f 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) enable = 1; if (enable) { - eeh_subsystem_enabled = 1; + eeh_set_enable(true); eeh_add_to_parent_pe(edev); pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 70670a2d9cf2..c413ec158ff5 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; - const __be32 *pcie_link_speed_stats; + u32 pcie_link_speed_stats[2]; + int rc; bus = bridge->bus; @@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { - pcie_link_speed_stats = of_get_property(pdn, - "ibm,pcie-link-speed-stats", NULL); - if (pcie_link_speed_stats) + rc = of_property_read_u32_array(pdn, + "ibm,pcie-link-speed-stats", + &pcie_link_speed_stats[0], 2); + if (!rc) break; } of_node_put(pdn); - if (!pcie_link_speed_stats) { + if (rc) { pr_err("no ibm,pcie-link-speed-stats property\n"); return 0; } - switch (be32_to_cpup(pcie_link_speed_stats)) { + switch (pcie_link_speed_stats[0]) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->max_bus_speed = PCIE_SPEED_5_0GT; break; + case 0x04: + bus->max_bus_speed = PCIE_SPEED_8_0GT; + break; default: bus->max_bus_speed = PCI_SPEED_UNKNOWN; break; } - switch (be32_to_cpup(pcie_link_speed_stats)) { + switch (pcie_link_speed_stats[1]) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->cur_bus_speed = PCIE_SPEED_5_0GT; break; + case 0x04: + bus->cur_bus_speed = PCIE_SPEED_8_0GT; + break; default: bus->cur_bus_speed = PCI_SPEED_UNKNOWN; break; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8e639d7cbda7..972df0ffd4dc 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image) { long rc; - if (firmware_has_feature(FW_FEATURE_SET_MODE) && - (image->type != KEXEC_TYPE_CRASH)) { + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { rc = pSeries_disable_reloc_on_exc(); if (rc != H_SUCCESS) pr_warning("Warning: Failed to disable relocation on " diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0e166ed4cd16..8209744b2829 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) /* Default: read HW settings */ if (flow_type == IRQ_TYPE_DEFAULT) { - switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | - MPIC_INFO(VECPRI_SENSE_MASK))) { - case MPIC_INFO(VECPRI_SENSE_EDGE) | - MPIC_INFO(VECPRI_POLARITY_POSITIVE): - flow_type = IRQ_TYPE_EDGE_RISING; - break; - case MPIC_INFO(VECPRI_SENSE_EDGE) | - MPIC_INFO(VECPRI_POLARITY_NEGATIVE): - flow_type = IRQ_TYPE_EDGE_FALLING; - break; - case MPIC_INFO(VECPRI_SENSE_LEVEL) | - MPIC_INFO(VECPRI_POLARITY_POSITIVE): - flow_type = IRQ_TYPE_LEVEL_HIGH; - break; - case MPIC_INFO(VECPRI_SENSE_LEVEL) | - MPIC_INFO(VECPRI_POLARITY_NEGATIVE): - flow_type = IRQ_TYPE_LEVEL_LOW; - break; - } + int vold_ps; + + vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | + MPIC_INFO(VECPRI_SENSE_MASK)); + + if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE))) + flow_type = IRQ_TYPE_EDGE_RISING; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) + flow_type = IRQ_TYPE_EDGE_FALLING; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE))) + flow_type = IRQ_TYPE_LEVEL_HIGH; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) + flow_type = IRQ_TYPE_LEVEL_LOW; + else + WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold); } /* Apply to irq desc */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a90731b3d44a..b07909850f77 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -309,16 +309,23 @@ static void get_output_lock(void) if (xmon_speaker == me) return; + for (;;) { - if (xmon_speaker == 0) { - last_speaker = cmpxchg(&xmon_speaker, 0, me); - if (last_speaker == 0) - return; - } - timeout = 10000000; + last_speaker = cmpxchg(&xmon_speaker, 0, me); + if (last_speaker == 0) + return; + + /* + * Wait a full second for the lock, we might be on a slow + * console, but check every 100us. + */ + timeout = 10000; while (xmon_speaker == last_speaker) { - if (--timeout > 0) + if (--timeout > 0) { + udelay(100); continue; + } + /* hostile takeover */ prev = cmpxchg(&xmon_speaker, last_speaker, me); if (prev == last_speaker) @@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } xmon_fault_jmp[cpu] = recurse_jmp; - cpumask_set_cpu(cpu, &cpus_in_xmon); bp = NULL; if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) @@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) release_output_lock(); } + cpumask_set_cpu(cpu, &cpus_in_xmon); + waiting: secondary = 1; while (secondary && !xmon_gate) { diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3b978c472d08..3d6b9f81cc68 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -132,6 +132,8 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); extern void efi_setup_page_tables(void); extern void __init old_map_region(efi_memory_desc_t *md); +extern void __init runtime_code_page_mkexec(void); +extern void __init efi_runtime_mkexec(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 24b6fd10625a..8e28bf2fc3ef 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) raw_local_save_flags(eflags); BUG_ON(eflags & X86_EFLAGS_AC); - if (cpu_has(c, X86_FEATURE_SMAP)) + if (cpu_has(c, X86_FEATURE_SMAP)) { +#ifdef CONFIG_X86_SMAP set_in_cr4(X86_CR4_SMAP); +#else + clear_in_cr4(X86_CR4_SMAP); +#endif + } } /* diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d4bdd253fea7..e6253195a301 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) return addr >= start && addr < end; } -static int -do_ftrace_mod_code(unsigned long ip, const void *new_code) +static unsigned long text_ip_addr(unsigned long ip) { /* * On x86_64, kernel text mappings are mapped read-only with @@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code) if (within(ip, (unsigned long)_text, (unsigned long)_etext)) ip = (unsigned long)__va(__pa_symbol(ip)); - return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); + return ip; } static const unsigned char *ftrace_nop_replace(void) @@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; + ip = text_ip_addr(ip); + /* replace the text with the new text */ - if (do_ftrace_mod_code(ip, new_code)) + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) return -EPERM; sync_core(); @@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return -EINVAL; } -int ftrace_update_ftrace_func(ftrace_func_t func) +static unsigned long ftrace_update_func; + +static int update_ftrace_func(unsigned long ip, void *new) { - unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[MCOUNT_INSN_SIZE], *new; + unsigned char old[MCOUNT_INSN_SIZE]; int ret; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); - new = ftrace_call_replace(ip, (unsigned long)func); + memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); + + ftrace_update_func = ip; + /* Make sure the breakpoints see the ftrace_update_func update */ + smp_wmb(); /* See comment above by declaration of modifying_ftrace_code */ atomic_inc(&modifying_ftrace_code); ret = ftrace_modify_code(ip, old, new); + atomic_dec(&modifying_ftrace_code); + + return ret; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned char *new; + int ret; + + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); + /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); - ret = ftrace_modify_code(ip, old, new); + ret = update_ftrace_func(ip, new); } - atomic_dec(&modifying_ftrace_code); - return ret; } static int is_ftrace_caller(unsigned long ip) { - if (ip == (unsigned long)(&ftrace_call) || - ip == (unsigned long)(&ftrace_regs_call)) + if (ip == ftrace_update_func) return 1; return 0; @@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data) #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); -static int ftrace_mod_jmp(unsigned long ip, - int old_offset, int new_offset) +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { - unsigned char code[MCOUNT_INSN_SIZE]; + static union ftrace_code_union calc; - if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) - return -EFAULT; + /* Jmp not a call (ignore the .e8) */ + calc.e8 = 0xe9; + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) - return -EINVAL; + /* + * ftrace external locks synchronize the access to the static variable. + */ + return calc.code; +} - *(int *)(&code[1]) = new_offset; +static int ftrace_mod_jmp(unsigned long ip, void *func) +{ + unsigned char *new; - if (do_ftrace_mod_code(ip, &code)) - return -EPERM; + new = ftrace_jmp_replace(ip, (unsigned long)func); - return 0; + return update_ftrace_func(ip, new); } int ftrace_enable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_graph_caller); } int ftrace_disable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - - old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_stub); } #endif /* !CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 19e5adb49a27..acb3b606613e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) * dance when its actually needed. */ - preempt_disable(); + preempt_disable_notrace(); data = this_cpu_read(cyc2ns.head); tail = this_cpu_read(cyc2ns.tail); @@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) if (!--data->__count) this_cpu_write(cyc2ns.tail, data); } - preempt_enable(); + preempt_enable_notrace(); return ns; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9d591c895803..6dea040cc3a1 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address) static inline bool smap_violation(int error_code, struct pt_regs *regs) { + if (!IS_ENABLED(CONFIG_X86_SMAP)) + return false; + + if (!static_cpu_has(X86_FEATURE_SMAP)) + return false; + if (error_code & PF_USER) return false; @@ -1087,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - if (static_cpu_has(X86_FEATURE_SMAP)) { - if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); - return; - } + if (unlikely(smap_violation(error_code, regs))) { + bad_area_nosemaphore(regs, error_code, address); + return; } /* diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index 4df9591eadad..f15103dff4b4 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -42,7 +42,7 @@ void __init efi_bgrt_init(void) if (bgrt_tab->header.length < sizeof(*bgrt_tab)) return; - if (bgrt_tab->version != 1) + if (bgrt_tab->version != 1 || bgrt_tab->status != 1) return; if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) return; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d62ec87a2b26..1a201ac7cef8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -792,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable) set_memory_nx(addr, npages); } -static void __init runtime_code_page_mkexec(void) +void __init runtime_code_page_mkexec(void) { efi_memory_desc_t *md; void *p; @@ -1069,8 +1069,7 @@ void __init efi_enter_virtual_mode(void) efi.update_capsule = virt_efi_update_capsule; efi.query_capsule_caps = virt_efi_query_capsule_caps; - if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) - runtime_code_page_mkexec(); + efi_runtime_mkexec(); kfree(new_memmap); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 249b183cf417..0b74cdf7f816 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -77,3 +77,9 @@ void efi_call_phys_epilog(void) local_irq_restore(efi_rt_eflags); } + +void __init efi_runtime_mkexec(void) +{ + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6284f158a47d..0c2a234fef1e 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len) { efi_setup = phys_addr + sizeof(struct setup_data); } + +void __init efi_runtime_mkexec(void) +{ + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} diff --git a/block/blk-core.c b/block/blk-core.c index c00e0bdeab4a..853f92749202 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) if (!uninit_q) return NULL; + uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); + if (!uninit_q->flush_rq) + goto out_cleanup_queue; + q = blk_init_allocated_queue(uninit_q, rfn, lock); if (!q) - blk_cleanup_queue(uninit_q); - + goto out_free_flush_rq; return q; + +out_free_flush_rq: + kfree(uninit_q->flush_rq); +out_cleanup_queue: + blk_cleanup_queue(uninit_q); + return NULL; } EXPORT_SYMBOL(blk_init_queue_node); @@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { if (q->mq_ops) - return blk_mq_alloc_request(q, rw, gfp_mask, false); + return blk_mq_alloc_request(q, rw, gfp_mask); else return blk_old_get_request(q, rw, gfp_mask); } @@ -1278,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req) if (unlikely(!q)) return; + if (q->mq_ops) { + blk_mq_free_request(req); + return; + } + blk_pm_put_request(req); elv_completed_request(q, req); diff --git a/block/blk-exec.c b/block/blk-exec.c index bbfc072a79c2..c68613bb4c79 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, * be resued after dying flag is set */ if (q->mq_ops) { - blk_mq_insert_request(q, rq, true); + blk_mq_insert_request(q, rq, at_head, true); return; } diff --git a/block/blk-flush.c b/block/blk-flush.c index 9288aaf35c21..66e2b697f5db 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq) blk_clear_rq_complete(rq); } -static void mq_flush_data_run(struct work_struct *work) +static void mq_flush_run(struct work_struct *work) { struct request *rq; - rq = container_of(work, struct request, mq_flush_data); + rq = container_of(work, struct request, mq_flush_work); memset(&rq->csd, 0, sizeof(rq->csd)); blk_mq_run_request(rq, true, false); } -static void blk_mq_flush_data_insert(struct request *rq) +static bool blk_flush_queue_rq(struct request *rq) { - INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); - kblockd_schedule_work(rq->q, &rq->mq_flush_data); + if (rq->q->mq_ops) { + INIT_WORK(&rq->mq_flush_work, mq_flush_run); + kblockd_schedule_work(rq->q, &rq->mq_flush_work); + return false; + } else { + list_add_tail(&rq->queuelist, &rq->q->queue_head); + return true; + } } /** @@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &q->flush_data_in_flight); - if (q->mq_ops) - blk_mq_flush_data_insert(rq); - else { - list_add(&rq->queuelist, &q->queue_head); - queued = true; - } + queued = blk_flush_queue_rq(rq); break; case REQ_FSEQ_DONE: @@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, } kicked = blk_kick_flush(q); - /* blk_mq_run_flush will run queue */ - if (q->mq_ops) - return queued; return kicked | queued; } @@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error) struct request *rq, *n; unsigned long flags = 0; - if (q->mq_ops) { - blk_mq_free_request(flush_rq); + if (q->mq_ops) spin_lock_irqsave(&q->mq_flush_lock, flags); - } + running = &q->flush_queue[q->flush_running_idx]; BUG_ON(q->flush_pending_idx == q->flush_running_idx); @@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error) * kblockd. */ if (queued || q->flush_queue_delayed) { - if (!q->mq_ops) - blk_run_queue_async(q); - else - /* - * This can be optimized to only run queues with requests - * queued if necessary. - */ - blk_mq_run_queues(q, true); + WARN_ON(q->mq_ops); + blk_run_queue_async(q); } q->flush_queue_delayed = 0; if (q->mq_ops) spin_unlock_irqrestore(&q->mq_flush_lock, flags); } -static void mq_flush_work(struct work_struct *work) -{ - struct request_queue *q; - struct request *rq; - - q = container_of(work, struct request_queue, mq_flush_work); - - /* We don't need set REQ_FLUSH_SEQ, it's for consistency */ - rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ, - __GFP_WAIT|GFP_ATOMIC, true); - rq->cmd_type = REQ_TYPE_FS; - rq->end_io = flush_end_io; - - blk_mq_run_request(rq, true, false); -} - -/* - * We can't directly use q->flush_rq, because it doesn't have tag and is not in - * hctx->rqs[]. so we must allocate a new request, since we can't sleep here, - * so offload the work to workqueue. - * - * Note: we assume a flush request finished in any hardware queue will flush - * the whole disk cache. - */ -static void mq_run_flush(struct request_queue *q) -{ - kblockd_schedule_work(q, &q->mq_flush_work); -} - /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked @@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q) * different from running_idx, which means flush is in flight. */ q->flush_pending_idx ^= 1; + if (q->mq_ops) { - mq_run_flush(q); - return true; + struct blk_mq_ctx *ctx = first_rq->mq_ctx; + struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); + + blk_mq_rq_init(hctx, q->flush_rq); + q->flush_rq->mq_ctx = ctx; + + /* + * Reuse the tag value from the fist waiting request, + * with blk-mq the tag is generated during request + * allocation and drivers can rely on it being inside + * the range they asked for. + */ + q->flush_rq->tag = first_rq->tag; + } else { + blk_rq_init(q, q->flush_rq); } - blk_rq_init(q, &q->flush_rq); - q->flush_rq.cmd_type = REQ_TYPE_FS; - q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; - q->flush_rq.rq_disk = first_rq->rq_disk; - q->flush_rq.end_io = flush_end_io; + q->flush_rq->cmd_type = REQ_TYPE_FS; + q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; + q->flush_rq->rq_disk = first_rq->rq_disk; + q->flush_rq->end_io = flush_end_io; - list_add_tail(&q->flush_rq.queuelist, &q->queue_head); - return true; + return blk_flush_queue_rq(q->flush_rq); } static void flush_data_end_io(struct request *rq, int error) @@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush); void blk_mq_init_flush(struct request_queue *q) { spin_lock_init(&q->mq_flush_lock); - INIT_WORK(&q->mq_flush_work, mq_flush_work); } diff --git a/block/blk-lib.c b/block/blk-lib.c index 2da76c999ef3..97a733cf3d5f 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, atomic_inc(&bb.done); submit_bio(type, bio); + + /* + * We can loop for a long time in here, if someone does + * full device discards (like mkfs). Be nice and allow + * us to schedule out to avoid softlocking if preempt + * is disabled. + */ + cond_resched(); } blk_finish_plug(&plug); diff --git a/block/blk-merge.c b/block/blk-merge.c index 8f8adaa95466..6c583f9c5b65 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, if (!bio) return 0; + /* + * This should probably be returning 0, but blk_add_request_payload() + * (Christoph!!!!) + */ + if (bio->bi_rw & REQ_DISCARD) + return 1; + + if (bio->bi_rw & REQ_WRITE_SAME) + return 1; + fbio = bio; cluster = blk_queue_cluster(q); seg_size = 0; @@ -161,30 +171,60 @@ new_segment: *bvprv = *bvec; } -/* - * map a request to scatterlist, return number of sg entries setup. Caller - * must make sure sg can hold rq->nr_phys_segments entries - */ -int blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist) +static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist, + struct scatterlist **sg) { struct bio_vec bvec, bvprv = { NULL }; - struct req_iterator iter; - struct scatterlist *sg; + struct bvec_iter iter; int nsegs, cluster; nsegs = 0; cluster = blk_queue_cluster(q); - /* - * for each bio in rq - */ - sg = NULL; - rq_for_each_segment(bvec, rq, iter) { - __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, - &nsegs, &cluster); - } /* segments in rq */ + if (bio->bi_rw & REQ_DISCARD) { + /* + * This is a hack - drivers should be neither modifying the + * biovec, nor relying on bi_vcnt - but because of + * blk_add_request_payload(), a discard bio may or may not have + * a payload we need to set up here (thank you Christoph) and + * bi_vcnt is really the only way of telling if we need to. + */ + + if (bio->bi_vcnt) + goto single_segment; + + return 0; + } + + if (bio->bi_rw & REQ_WRITE_SAME) { +single_segment: + *sg = sglist; + bvec = bio_iovec(bio); + sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); + return 1; + } + + for_each_bio(bio) + bio_for_each_segment(bvec, bio, iter) + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, + &nsegs, &cluster); + return nsegs; +} + +/* + * map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries + */ +int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *sg = NULL; + int nsegs = 0; + + if (rq->bio) + nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); if (unlikely(rq->cmd_flags & REQ_COPY_USER) && (blk_rq_bytes(rq) & q->dma_pad_mask)) { @@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg); int blk_bio_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist) { - struct bio_vec bvec, bvprv = { NULL }; - struct scatterlist *sg; - int nsegs, cluster; - struct bvec_iter iter; - - nsegs = 0; - cluster = blk_queue_cluster(q); - - sg = NULL; - bio_for_each_segment(bvec, bio, iter) { - __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, - &nsegs, &cluster); - } /* segments in bio */ + struct scatterlist *sg = NULL; + int nsegs; + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); + bio->bi_next = next; if (sg) sg_mark_end(sg); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 5d70edc9855f..83ae96c51a27 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; - int cpu; + unsigned int cpu; if (!tags) return 0; diff --git a/block/blk-mq.c b/block/blk-mq.c index 57039fcd9c93..1fa9dd153fde 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -226,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, return rq; } -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, - gfp_t gfp, bool reserved) +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) { struct request *rq; if (blk_mq_queue_enter(q)) return NULL; - rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); + rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); if (rq) blk_mq_put_ctx(rq->mq_ctx); return rq; @@ -258,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request); /* * Re-init and set pdu, if we have it */ -static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) +void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) { blk_rq_init(hctx->queue, rq); @@ -305,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) bio_endio(bio, error); } -void blk_mq_complete_request(struct request *rq, int error) +void blk_mq_end_io(struct request *rq, int error) { struct bio *bio = rq->bio; unsigned int bytes = 0; @@ -330,48 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error) else blk_mq_free_request(rq); } +EXPORT_SYMBOL(blk_mq_end_io); -void __blk_mq_end_io(struct request *rq, int error) -{ - if (!blk_mark_rq_complete(rq)) - blk_mq_complete_request(rq, error); -} - -static void blk_mq_end_io_remote(void *data) +static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; - __blk_mq_end_io(rq, rq->errors); + rq->q->softirq_done_fn(rq); } -/* - * End IO on this request on a multiqueue enabled driver. We'll either do - * it directly inline, or punt to a local IPI handler on the matching - * remote CPU. - */ -void blk_mq_end_io(struct request *rq, int error) +void __blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; int cpu; - if (!ctx->ipi_redirect) - return __blk_mq_end_io(rq, error); + if (!ctx->ipi_redirect) { + rq->q->softirq_done_fn(rq); + return; + } cpu = get_cpu(); if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { - rq->errors = error; - rq->csd.func = blk_mq_end_io_remote; + rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; __smp_call_function_single(ctx->cpu, &rq->csd, 0); } else { - __blk_mq_end_io(rq, error); + rq->q->softirq_done_fn(rq); } put_cpu(); } -EXPORT_SYMBOL(blk_mq_end_io); -static void blk_mq_start_request(struct request *rq) +/** + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed + * + * Description: + * Ends all I/O on a request. It does not handle partial completions. + * The actual completion happens out-of-order, through a IPI handler. + **/ +void blk_mq_complete_request(struct request *rq) +{ + if (unlikely(blk_should_fake_timeout(rq->q))) + return; + if (!blk_mark_rq_complete(rq)) + __blk_mq_complete_request(rq); +} +EXPORT_SYMBOL(blk_mq_complete_request); + +static void blk_mq_start_request(struct request *rq, bool last) { struct request_queue *q = rq->q; @@ -384,6 +390,25 @@ static void blk_mq_start_request(struct request *rq) */ rq->deadline = jiffies + q->rq_timeout; set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * Make sure space for the drain appears. We know we can do + * this because max_hw_segments has been adjusted to be one + * fewer than the device can handle. + */ + rq->nr_phys_segments++; + } + + /* + * Flag the last request in the series so that drivers know when IO + * should be kicked off, if they don't do it on a per-request basis. + * + * Note: the flag isn't the only condition drivers should do kick off. + * If drive is busy, the last request might not have the bit set. + */ + if (last) + rq->cmd_flags |= REQ_END; } static void blk_mq_requeue_request(struct request *rq) @@ -392,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq) trace_block_rq_requeue(q, rq); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + + rq->cmd_flags &= ~REQ_END; + + if (q->dma_drain_size && blk_rq_bytes(rq)) + rq->nr_phys_segments--; } struct blk_mq_timeout_data { @@ -559,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) rq = list_first_entry(&rq_list, struct request, queuelist); list_del_init(&rq->queuelist); - blk_mq_start_request(rq); - /* - * Last request in the series. Flag it as such, this - * enables drivers to know when IO should be kicked off, - * if they don't do it on a per-request basis. - * - * Note: the flag isn't the only condition drivers - * should do kick off. If drive is busy, the last - * request might not have the bit set. - */ - if (list_empty(&rq_list)) - rq->cmd_flags |= REQ_END; + blk_mq_start_request(rq, list_empty(&rq_list)); ret = q->mq_ops->queue_rq(hctx, rq); switch (ret) { @@ -589,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); - rq->errors = -EIO; case BLK_MQ_RQ_QUEUE_ERROR: + rq->errors = -EIO; blk_mq_end_io(rq, rq->errors); break; } @@ -693,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work) } static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, - struct request *rq) + struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; trace_block_rq_insert(hctx->queue, rq); - list_add_tail(&rq->queuelist, &ctx->rq_list); + if (at_head) + list_add(&rq->queuelist, &ctx->rq_list); + else + list_add_tail(&rq->queuelist, &ctx->rq_list); blk_mq_hctx_mark_pending(hctx, ctx); /* @@ -709,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, } void blk_mq_insert_request(struct request_queue *q, struct request *rq, - bool run_queue) + bool at_head, bool run_queue) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx, *current_ctx; @@ -728,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq, rq->mq_ctx = ctx; } spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); blk_mq_put_ctx(current_ctx); @@ -760,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async) /* ctx->cpu might be offline */ spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, false); spin_unlock(&ctx->lock); blk_mq_put_ctx(current_ctx); @@ -798,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); rq->mq_ctx = ctx; - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, false); } spin_unlock(&ctx->lock); @@ -888,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_queue_bounce(q, &bio); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) return; @@ -950,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) __blk_mq_free_request(hctx, ctx, rq); else { blk_mq_bio_to_request(rq, bio); - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, false); } spin_unlock(&ctx->lock); @@ -1309,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, reg->queue_depth = BLK_MQ_MAX_DEPTH; } - /* - * Set aside a tag for flush requests. It will only be used while - * another flush request is in progress but outside the driver. - * - * TODO: only allocate if flushes are supported - */ - reg->queue_depth++; - reg->reserved_tags++; - if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) return ERR_PTR(-EINVAL); @@ -1360,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, q->mq_ops = reg->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + q->sg_reserved_size = INT_MAX; + blk_queue_make_request(q, blk_mq_make_request); blk_queue_rq_timed_out(q, reg->ops->timeout); if (reg->timeout) blk_queue_rq_timeout(q, reg->timeout); + if (reg->ops->complete) + blk_queue_softirq_done(q, reg->ops->complete); + blk_mq_init_flush(q); blk_mq_init_cpu_queues(q, reg->nr_hw_queues); - if (blk_mq_init_hw_queues(q, reg, driver_data)) + q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, + cache_line_size()), GFP_KERNEL); + if (!q->flush_rq) goto err_hw; + if (blk_mq_init_hw_queues(q, reg, driver_data)) + goto err_flush_rq; + blk_mq_map_swqueue(q); mutex_lock(&all_q_mutex); @@ -1378,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, mutex_unlock(&all_q_mutex); return q; + +err_flush_rq: + kfree(q->flush_rq); err_hw: kfree(q->mq_map); err_map: diff --git a/block/blk-mq.h b/block/blk-mq.h index 5c3917984b00..ed0035cd458e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -22,13 +22,13 @@ struct blk_mq_ctx { struct kobject kobj; }; -void __blk_mq_end_io(struct request *rq, int error); -void blk_mq_complete_request(struct request *rq, int error); +void __blk_mq_complete_request(struct request *rq); void blk_mq_run_request(struct request *rq, bool run_queue, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); +void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); /* * CPU hotplug helpers diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8095c4a21fc0..7500f876dae4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj) if (q->mq_ops) blk_mq_free_queue(q); + kfree(q->flush_rq); + blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index bba81c9348e1..d96f7061c6fd 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req) case BLK_EH_HANDLED: /* Can we use req->errors here? */ if (q->mq_ops) - blk_mq_complete_request(req, req->errors); + __blk_mq_complete_request(req); else __blk_complete_request(req); break; diff --git a/block/blk.h b/block/blk.h index c90e1d8f7a2b..d23b415b8a28 100644 --- a/block/blk.h +++ b/block/blk.h @@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) q->flush_queue_delayed = 1; return NULL; } - if (unlikely(blk_queue_dying(q)) || + if (unlikely(blk_queue_bypass(q)) || !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) return NULL; } diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 0b6ae6eb5c4a..368f9ddb8480 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev, ACPI_COMPANION_SET(dev, adev); dev->release = acpi_container_release; ret = device_register(dev); - if (ret) + if (ret) { + put_device(dev); return ret; - + } adev->driver_data = dev; return 1; } diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index c431c88faaff..e9b3081c4fe9 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event) static void dock_notify(struct dock_station *ds, u32 event) { acpi_handle handle = ds->handle; - struct acpi_device *ad; + struct acpi_device *adev = NULL; int surprise_removal = 0; /* @@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event) switch (event) { case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: - if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) { + acpi_bus_get_device(handle, &adev); + if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) { begin_dock(ds); dock(ds); if (!dock_present(ds)) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 20a7517bd339..52b8181ddafd 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -4126,12 +4126,14 @@ static int mv_platform_probe(struct platform_device *pdev) clk_prepare_enable(hpriv->port_clks[port]); sprintf(port_number, "port%d", port); - hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number); + hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev, + port_number); if (IS_ERR(hpriv->port_phys[port])) { rc = PTR_ERR(hpriv->port_phys[port]); hpriv->port_phys[port] = NULL; - if ((rc != -EPROBE_DEFER) && (rc != -ENODEV)) - dev_warn(&pdev->dev, "error getting phy"); + if (rc != -EPROBE_DEFER) + dev_warn(&pdev->dev, "error getting phy %d", + rc); goto err; } else phy_power_on(hpriv->port_phys[port]); diff --git a/drivers/base/component.c b/drivers/base/component.c index c53efe6c6d8e..c4778995cd72 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master, goto out; } + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + /* Found all components */ ret = master->ops->bind(master->dev); if (ret < 0) { + devres_release_group(master->dev, NULL); + dev_info(master->dev, "master bind failed: %d\n", ret); master_remove_components(master); goto out; } @@ -166,6 +173,7 @@ static void take_down_master(struct master *master) { if (master->bound) { master->ops->unbind(master->dev); + devres_release_group(master->dev, NULL); master->bound = false; } diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 1e16cbd61da2..61d6d62cc0d3 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s) if (ret) return ret; - seq_printf(s, "\nDma-buf Objects:\n"); - seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); + seq_puts(s, "\nDma-buf Objects:\n"); + seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); list_for_each_entry(buf_obj, &db_list.head, list_node) { ret = mutex_lock_interruptible(&buf_obj->lock); if (ret) { - seq_printf(s, - "\tERROR locking buffer object: skipping\n"); + seq_puts(s, + "\tERROR locking buffer object: skipping\n"); continue; } - seq_printf(s, "\t"); - - seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n", - buf_obj->exp_name, buf_obj->size, + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", + buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, - (long)(buf_obj->file->f_count.counter)); + (long)(buf_obj->file->f_count.counter), + buf_obj->exp_name); - seq_printf(s, "\t\tAttached Devices:\n"); + seq_puts(s, "\tAttached Devices:\n"); attach_count = 0; list_for_each_entry(attach_obj, &buf_obj->attachments, node) { - seq_printf(s, "\t\t"); + seq_puts(s, "\t"); - seq_printf(s, "%s\n", attach_obj->dev->init_name); + seq_printf(s, "%s\n", dev_name(attach_obj->dev)); attach_count++; } - seq_printf(s, "\n\t\tTotal %d devices attached\n", + seq_printf(s, "Total %d devices attached\n\n", attach_count); count++; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 3107282a9741..091b9ea14feb 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -60,7 +60,9 @@ enum { NULL_IRQ_NONE = 0, NULL_IRQ_SOFTIRQ = 1, NULL_IRQ_TIMER = 2, +}; +enum { NULL_Q_BIO = 0, NULL_Q_RQ = 1, NULL_Q_MQ = 2, @@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) static void end_cmd(struct nullb_cmd *cmd) { - if (cmd->rq) { - if (queue_mode == NULL_Q_MQ) - blk_mq_end_io(cmd->rq, 0); - else { - INIT_LIST_HEAD(&cmd->rq->queuelist); - blk_end_request_all(cmd->rq, 0); - } - } else if (cmd->bio) + switch (queue_mode) { + case NULL_Q_MQ: + blk_mq_end_io(cmd->rq, 0); + return; + case NULL_Q_RQ: + INIT_LIST_HEAD(&cmd->rq->queuelist); + blk_end_request_all(cmd->rq, 0); + break; + case NULL_Q_BIO: bio_endio(cmd->bio, 0); + break; + } - if (queue_mode != NULL_Q_MQ) - free_cmd(cmd); + free_cmd(cmd); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) @@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) cq = &per_cpu(completion_queues, smp_processor_id()); while ((entry = llist_del_all(&cq->list)) != NULL) { + entry = llist_reverse_order(entry); do { cmd = container_of(entry, struct nullb_cmd, ll_list); end_cmd(cmd); @@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) static void null_softirq_done_fn(struct request *rq) { - blk_end_request_all(rq, 0); -} - -#ifdef CONFIG_SMP - -static void null_ipi_cmd_end_io(void *data) -{ - struct completion_queue *cq; - struct llist_node *entry, *next; - struct nullb_cmd *cmd; - - cq = &per_cpu(completion_queues, smp_processor_id()); - - entry = llist_del_all(&cq->list); - - while (entry) { - next = entry->next; - cmd = llist_entry(entry, struct nullb_cmd, ll_list); - end_cmd(cmd); - entry = next; - } -} - -static void null_cmd_end_ipi(struct nullb_cmd *cmd) -{ - struct call_single_data *data = &cmd->csd; - int cpu = get_cpu(); - struct completion_queue *cq = &per_cpu(completion_queues, cpu); - - cmd->ll_list.next = NULL; - - if (llist_add(&cmd->ll_list, &cq->list)) { - data->func = null_ipi_cmd_end_io; - data->flags = 0; - __smp_call_function_single(cpu, data, 0); - } - - put_cpu(); + end_cmd(rq->special); } -#endif /* CONFIG_SMP */ - static inline void null_handle_cmd(struct nullb_cmd *cmd) { /* Complete IO by inline, softirq or timer */ switch (irqmode) { - case NULL_IRQ_NONE: - end_cmd(cmd); - break; case NULL_IRQ_SOFTIRQ: -#ifdef CONFIG_SMP - null_cmd_end_ipi(cmd); -#else + switch (queue_mode) { + case NULL_Q_MQ: + blk_mq_complete_request(cmd->rq); + break; + case NULL_Q_RQ: + blk_complete_request(cmd->rq); + break; + case NULL_Q_BIO: + /* + * XXX: no proper submitting cpu information available. + */ + end_cmd(cmd); + break; + } + break; + case NULL_IRQ_NONE: end_cmd(cmd); -#endif break; case NULL_IRQ_TIMER: null_cmd_end_timer(cmd); @@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = { .queue_rq = null_queue_rq, .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, + .complete = null_softirq_done_fn, }; static struct blk_mq_reg null_mq_reg = { @@ -609,13 +585,6 @@ static int __init null_init(void) { unsigned int i; -#if !defined(CONFIG_SMP) - if (irqmode == NULL_IRQ_SOFTIRQ) { - pr_warn("null_blk: softirq completions not available.\n"); - pr_warn("null_blk: using direct completions.\n"); - irqmode = NULL_IRQ_NONE; - } -#endif if (bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6a680d4de7f1..b1cb3f4c4db4 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq, return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } -static inline void virtblk_request_done(struct virtblk_req *vbr) +static inline void virtblk_request_done(struct request *req) { - struct request *req = vbr->req; + struct virtblk_req *vbr = req->special; int error = virtblk_result(vbr); if (req->cmd_type == REQ_TYPE_BLOCK_PC) { @@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { - virtblk_request_done(vbr); + blk_mq_complete_request(vbr->req); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) @@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = { .map_queue = blk_mq_map_queue, .alloc_hctx = blk_mq_alloc_single_hw_queue, .free_hctx = blk_mq_free_single_hw_queue, + .complete = virtblk_request_done, }; static struct blk_mq_reg virtio_mq_reg = { diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4b97b86da926..64c60edcdfbc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, BUG_ON(num != 0); } -static void unmap_purged_grants(struct work_struct *work) +void xen_blkbk_unmap_purged_grants(struct work_struct *work) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; @@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); - INIT_LIST_HEAD(&blkif->persistent_purge_list); + BUG_ON(!list_empty(&blkif->persistent_purge_list)); root = &blkif->persistent_gnts; purge_list: foreach_grant_safe(persistent_gnt, n, root, node) { @@ -420,7 +420,6 @@ finished: blkif->vbd.overflow_max_grants = 0; /* We can defer this work */ - INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); schedule_work(&blkif->persistent_purge_work); pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); return; @@ -625,9 +624,23 @@ purge_gnt_list: print_stats(blkif); } - /* Since we are shutting down remove all pages from the buffer */ - shrink_free_pagepool(blkif, 0 /* All */); + /* Drain pending purge work */ + flush_work(&blkif->persistent_purge_work); + if (log_stats) + print_stats(blkif); + + blkif->xenblkd = NULL; + xen_blkif_put(blkif); + + return 0; +} + +/* + * Remove persistent grants and empty the pool of free pages + */ +void xen_blkbk_free_caches(struct xen_blkif *blkif) +{ /* Free all persistent grant pages */ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) free_persistent_gnts(blkif, &blkif->persistent_gnts, @@ -636,13 +649,8 @@ purge_gnt_list: BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); blkif->persistent_gnt_c = 0; - if (log_stats) - print_stats(blkif); - - blkif->xenblkd = NULL; - xen_blkif_put(blkif); - - return 0; + /* Since we are shutting down remove all pages from the buffer */ + shrink_free_pagepool(blkif, 0 /* All */); } /* @@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, struct grant_page **pages = pending_req->indirect_pages; struct xen_blkif *blkif = pending_req->blkif; int indirect_grefs, rc, n, nseg, i; - struct blkif_request_segment_aligned *segments = NULL; + struct blkif_request_segment *segments = NULL; nseg = pending_req->nr_pages; indirect_grefs = INDIRECT_PAGES(nseg); @@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif) { atomic_set(&blkif->drain, 1); do { - /* The initial value is one, and one refcnt taken at the - * start of the xen_blkif_schedule thread. */ - if (atomic_read(&blkif->refcnt) <= 2) + if (atomic_read(&blkif->inflight) == 0) break; wait_for_completion_interruptible_timeout( &blkif->drain_complete, HZ); @@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) * the proper response on the ring. */ if (atomic_dec_and_test(&pending_req->pendcnt)) { - xen_blkbk_unmap(pending_req->blkif, + struct xen_blkif *blkif = pending_req->blkif; + + xen_blkbk_unmap(blkif, pending_req->segments, pending_req->nr_pages); - make_response(pending_req->blkif, pending_req->id, + make_response(blkif, pending_req->id, pending_req->operation, pending_req->status); - xen_blkif_put(pending_req->blkif); - if (atomic_read(&pending_req->blkif->refcnt) <= 2) { - if (atomic_read(&pending_req->blkif->drain)) - complete(&pending_req->blkif->drain_complete); + free_req(blkif, pending_req); + /* + * Make sure the request is freed before releasing blkif, + * or there could be a race between free_req and the + * cleanup done in xen_blkif_free during shutdown. + * + * NB: The fact that we might try to wake up pending_free_wq + * before drain_complete (in case there's a drain going on) + * it's not a problem with our current implementation + * because we can assure there's no thread waiting on + * pending_free_wq if there's a drain going on, but it has + * to be taken into account if the current model is changed. + */ + if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { + complete(&blkif->drain_complete); } - free_req(pending_req->blkif, pending_req); + xen_blkif_put(blkif); } } @@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. */ xen_blkif_get(blkif); + atomic_inc(&blkif->inflight); for (i = 0; i < nseg; i++) { while ((bio == NULL) || diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 8d8807563d99..be052773ad03 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -57,7 +57,7 @@ #define MAX_INDIRECT_SEGMENTS 256 #define SEGS_PER_INDIRECT_FRAME \ - (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) + (PAGE_SIZE/sizeof(struct blkif_request_segment)) #define MAX_INDIRECT_PAGES \ ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) #define INDIRECT_PAGES(_segs) \ @@ -278,6 +278,7 @@ struct xen_blkif { /* for barrier (drain) requests */ struct completion drain_complete; atomic_t drain; + atomic_t inflight; /* One thread per one blkif. */ struct task_struct *xenblkd; unsigned int waiting_reqs; @@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void); irqreturn_t xen_blkif_be_int(int irq, void *dev_id); int xen_blkif_schedule(void *arg); int xen_blkif_purge_persistent(void *arg); +void xen_blkbk_free_caches(struct xen_blkif *blkif); int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, struct backend_info *be, int state); @@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, int xen_blkbk_barrier(struct xenbus_transaction xbt, struct backend_info *be, int state); struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); +void xen_blkbk_unmap_purged_grants(struct work_struct *work); static inline void blkif_get_x86_32_req(struct blkif_request *dst, struct blkif_x86_32_request *src) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index c2014a0aa206..9a547e6b6ebf 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->persistent_gnts.rb_node = NULL; spin_lock_init(&blkif->free_pages_lock); INIT_LIST_HEAD(&blkif->free_pages); + INIT_LIST_HEAD(&blkif->persistent_purge_list); blkif->free_pages_num = 0; atomic_set(&blkif->persistent_gnt_in_use, 0); + atomic_set(&blkif->inflight, 0); + INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); INIT_LIST_HEAD(&blkif->pending_free); @@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif) if (!atomic_dec_and_test(&blkif->refcnt)) BUG(); + /* Remove all persistent grants and the cache of ballooned pages. */ + xen_blkbk_free_caches(blkif); + + /* Make sure everything is drained before shutting down */ + BUG_ON(blkif->persistent_gnt_c != 0); + BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0); + BUG_ON(blkif->free_pages_num != 0); + BUG_ON(!list_empty(&blkif->persistent_purge_list)); + BUG_ON(!list_empty(&blkif->free_pages)); + BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); + /* Check that there is no request in use */ list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { list_del(&req->free_list); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8dcfb54f1603..efe1b4761735 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock); #define DEV_NAME "xvd" /* name in /dev */ #define SEGS_PER_INDIRECT_FRAME \ - (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) + (PAGE_SIZE/sizeof(struct blkif_request_segment)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) @@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req) unsigned long id; unsigned int fsect, lsect; int i, ref, n; - struct blkif_request_segment_aligned *segments = NULL; + struct blkif_request_segment *segments = NULL; /* * Used to store if we are able to queue the request by just using @@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req) } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = - (struct blkif_request_segment_aligned) { + (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; @@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev, case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: - case XenbusStateClosed: break; case XenbusStateConnected: blkfront_connect(info); break; + case XenbusStateClosed: + if (dev->state == XenbusStateClosed) + break; + /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: blkfront_closing(info); break; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index fa3243d71c76..1386749b48ff 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -499,6 +499,7 @@ config RAW_DRIVER config MAX_RAW_DEVS int "Maximum number of RAW devices to support (1-65536)" depends on RAW_DRIVER + range 1 65536 default "256" help The maximum number of RAW devices that are supported. diff --git a/drivers/char/raw.c b/drivers/char/raw.c index f3223aac4df1..6e8d65e9b1d3 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev) struct raw_device_data *rawdev; struct block_device *bdev; - if (number <= 0 || number >= MAX_RAW_MINORS) + if (number <= 0 || number >= max_raw_minors) return -EINVAL; rawdev = &raw_devices[number]; diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 974b2db2fe10..0595dc6c453e 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) return; } -static void __init kona_timers_init(struct device_node *node) -{ - u32 freq; - struct clk *external_clk; - - external_clk = of_clk_get_by_name(node, NULL); - - if (!IS_ERR(external_clk)) { - arch_timer_rate = clk_get_rate(external_clk); - clk_prepare_enable(external_clk); - } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { - arch_timer_rate = freq; - } else { - panic("unable to determine clock-frequency"); - } - - /* Setup IRQ numbers */ - timers.tmr_irq = irq_of_parse_and_map(node, 0); - - /* Setup IO addresses */ - timers.tmr_regs = of_iomap(node, 0); - - kona_timer_disable_and_clear(timers.tmr_regs); -} - static int kona_timer_set_next_event(unsigned long clc, struct clock_event_device *unused) { @@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = { static void __init kona_timer_init(struct device_node *node) { - kona_timers_init(node); + u32 freq; + struct clk *external_clk; + + if (!of_device_is_available(node)) { + pr_info("Kona Timer v1 marked as disabled in device tree\n"); + return; + } + + external_clk = of_clk_get_by_name(node, NULL); + + if (!IS_ERR(external_clk)) { + arch_timer_rate = clk_get_rate(external_clk); + clk_prepare_enable(external_clk); + } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { + arch_timer_rate = freq; + } else { + pr_err("Kona Timer v1 unable to determine clock-frequency"); + return; + } + + /* Setup IRQ numbers */ + timers.tmr_irq = irq_of_parse_and_map(node, 0); + + /* Setup IO addresses */ + timers.tmr_regs = of_iomap(node, 0); + + kona_timer_disable_and_clear(timers.tmr_regs); + kona_timer_clockevents_init(); setup_irq(timers.tmr_irq, &kona_timer_irq); kona_timer_set_next_event((arch_timer_rate / HZ), NULL); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 79606f473f48..c788abf1c457 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -51,8 +51,6 @@ static inline int32_t div_fp(int32_t x, int32_t y) return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); } -static u64 energy_divisor; - struct sample { int32_t core_pct_busy; u64 aperf; @@ -630,12 +628,10 @@ static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; struct sample *sample; - u64 energy; intel_pstate_sample(cpu); sample = &cpu->samples[cpu->sample_ptr]; - rdmsrl(MSR_PKG_ENERGY_STATUS, energy); intel_pstate_adjust_busy_pstate(cpu); @@ -644,7 +640,6 @@ static void intel_pstate_timer_func(unsigned long __data) cpu->pstate.current_pstate, sample->mperf, sample->aperf, - div64_u64(energy, energy_divisor), sample->freq); intel_pstate_set_sample_time(cpu); @@ -926,7 +921,6 @@ static int __init intel_pstate_init(void) int cpu, rc = 0; const struct x86_cpu_id *id; struct cpu_defaults *cpu_info; - u64 units; if (no_load) return -ENODEV; @@ -960,9 +954,6 @@ static int __init intel_pstate_init(void) if (rc) goto out; - rdmsrl(MSR_RAPL_POWER_UNIT, units); - energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */ - intel_pstate_debug_expose_params(); intel_pstate_sysfs_expose_params(); diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 6c4c000671c5..1e5481d88a26 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size( return sl->entry_nr * sizeof(struct nx842_slentry); } +static inline unsigned long nx842_get_pa(void *addr) +{ + if (is_vmalloc_addr(addr)) + return page_to_phys(vmalloc_to_page(addr)) + + offset_in_page(addr); + else + return __pa(addr); +} + static int nx842_build_scatterlist(unsigned long buf, int len, struct nx842_scatterlist *sl) { @@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len, entry = sl->entries; while (len) { - entry->ptr = __pa(buf); + entry->ptr = nx842_get_pa((void *)buf); nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); if (nextpage < buf + len) { /* we aren't at the end yet */ @@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen, op.flags = NX842_OP_COMPRESS; csbcpb = &workmem->csbcpb; memset(csbcpb, 0, sizeof(*csbcpb)); - op.csbcpb = __pa(csbcpb); - op.out = __pa(slout.entries); + op.csbcpb = nx842_get_pa(csbcpb); + op.out = nx842_get_pa(slout.entries); for (i = 0; i < hdr->blocks_nr; i++) { /* @@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen, */ if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { /* Create direct DDE */ - op.in = __pa(inbuf); + op.in = nx842_get_pa((void *)inbuf); op.inlen = max_sync_size; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(inbuf, max_sync_size, &slin); - op.in = __pa(slin.entries); + op.in = nx842_get_pa(slin.entries); op.inlen = -nx842_get_scatterlist_size(&slin); } @@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, op.flags = NX842_OP_DECOMPRESS; csbcpb = &workmem->csbcpb; memset(csbcpb, 0, sizeof(*csbcpb)); - op.csbcpb = __pa(csbcpb); + op.csbcpb = nx842_get_pa(csbcpb); /* * max_sync_size may have changed since compression, @@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, if (likely((inbuf & NX842_HW_PAGE_MASK) == ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { /* Create direct DDE */ - op.in = __pa(inbuf); + op.in = nx842_get_pa((void *)inbuf); op.inlen = hdr->sizes[i]; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); - op.in = __pa(slin.entries); + op.in = nx842_get_pa(slin.entries); op.inlen = -nx842_get_scatterlist_size(&slin); } @@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, */ if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { /* Create direct DDE */ - op.out = __pa(outbuf); + op.out = nx842_get_pa((void *)outbuf); op.outlen = max_sync_size; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(outbuf, max_sync_size, &slout); - op.out = __pa(slout.entries); + op.out = nx842_get_pa(slout.entries); op.outlen = -nx842_get_scatterlist_size(&slout); } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9bed1a2a67a1..605b016bcea4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -346,6 +346,7 @@ config MOXART_DMA tristate "MOXART DMA support" depends on ARCH_MOXART select DMA_ENGINE + select DMA_OF select DMA_VIRTUAL_CHANNELS help Enable support for the MOXA ART SoC DMA controller. diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 53fb0c8365b0..766b68ed505c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) if (!mv_can_chain(grp_start)) goto submit_done; - dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", - old_chain_tail->async_tx.phys); + dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", + &old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); @@ -527,7 +527,8 @@ submit_done: /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { - char *hw_desc; + void *virt_desc; + dma_addr_t dma_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; @@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) " %d descriptor slots", idx); break; } - hw_desc = (char *) mv_chan->dma_desc_pool_virt; - slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + virt_desc = mv_chan->dma_desc_pool_virt; + slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); - hw_desc = (char *) mv_chan->dma_desc_pool; - slot->async_tx.phys = - (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + dma_desc = mv_chan->dma_desc_pool; + slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); @@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, int slot_cnt; dev_dbg(mv_chan_to_devp(mv_chan), - "%s dest: %x src %x len: %u flags: %ld\n", - __func__, dest, src, len, flags); + "%s dest: %pad src %pad len: %u flags: %ld\n", + __func__, &dest, &src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; @@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan_to_devp(mv_chan), - "%s src_cnt: %d len: dest %x %u flags: %ld\n", - __func__, src_cnt, len, dest, flags); + "%s src_cnt: %d len: %u dest %pad flags: %ld\n", + __func__, src_cnt, len, &dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index e8c9ef03495b..33edd6766344 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req) * * called with the mem_ctls_mutex held */ -static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) +static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, + bool init) { edac_dbg(0, "\n"); @@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) if (mci->op_state != OP_RUNNING_POLL) return; - INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); + if (init) + INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); + mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); } @@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) * user space has updated our poll period value, need to * reset our workq delays */ -void edac_mc_reset_delay_period(int value) +void edac_mc_reset_delay_period(unsigned long value) { struct mem_ctl_info *mci; struct list_head *item; @@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value) list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); - edac_mc_workq_setup(mci, (unsigned long) value); + edac_mc_workq_setup(mci, value, false); } mutex_unlock(&mem_ctls_mutex); @@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) /* This instance is NOW RUNNING */ mci->op_state = OP_RUNNING_POLL; - edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); + edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true); } else { mci->op_state = OP_RUNNING_INTERRUPT; } diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 8ec1747b1c39..b335c6ab5efe 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void) static int edac_set_poll_msec(const char *val, struct kernel_param *kp) { - long l; + unsigned long l; int ret; if (!val) return -EINVAL; - ret = kstrtol(val, 0, &l); + ret = kstrtoul(val, 0, &l); if (ret) return ret; - if (!l || ((int)l != l)) + + if (l < 1000) return -EINVAL; - *((int *)kp->arg) = l; + + *((unsigned long *)kp->arg) = l; /* notify edac_mc engine to reset the poll period */ edac_mc_reset_delay_period(l); diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 3d139c6e7fe3..f2118bfcf8df 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); extern void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, unsigned long value); -extern void edac_mc_reset_delay_period(int value); +extern void edac_mc_reset_delay_period(unsigned long value); extern void *edac_align_ptr(void **p, unsigned size, int n_elems); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 697338772b64..903f24d28ba0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -403,6 +403,7 @@ config GPIO_GRGPIO config GPIO_TB10X bool + select GENERIC_IRQ_CHIP select OF_GPIO comment "I2C GPIO expanders:" diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 233d088ac59f..f32357e2d78d 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2013 Broadcom Corporation + * Copyright (C) 2012-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = { module_platform_driver(bcm_kona_gpio_driver); -MODULE_AUTHOR("Broadcom"); +MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>"); MODULE_DESCRIPTION("Broadcom Kona GPIO Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c index d3550274b8f7..3c2ba2ad0ada 100644 --- a/drivers/gpio/gpio-clps711x.c +++ b/drivers/gpio/gpio-clps711x.c @@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); MODULE_DESCRIPTION("CLPS711X GPIO driver"); +MODULE_ALIAS("platform:clps711x-gpio"); diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index d1b50ef5fab8..e585163f1ad5 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = { static int intel_gpio_runtime_idle(struct device *dev) { - pm_schedule_suspend(dev, 500); - return -EBUSY; + int err = pm_schedule_suspend(dev, 500); + return err ?: -EBUSY; } static const struct dev_pm_ops intel_gpio_pm_ops = { diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 1d136eceda62..7081304d6797 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c @@ -40,6 +40,8 @@ #error GPIO32 option is not enabled for your xtensa core variant #endif +#if XCHAL_HAVE_CP + static inline unsigned long enable_cp(unsigned long *cpenable) { unsigned long flags; @@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable) local_irq_restore(flags); } +#else + +static inline unsigned long enable_cp(unsigned long *cpenable) +{ + *cpenable = 0; /* avoid uninitialized value warning */ + return 0; +} + +static inline void disable_cp(unsigned long flags, unsigned long cpenable) +{ +} + +#endif /* XCHAL_HAVE_CP */ + static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset) { return 1; /* input only */ diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index dffc836144cc..f4dc9b7a3831 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) case DRM_CAP_ASYNC_PAGE_FLIP: req->value = dev->mode_config.async_page_flip; break; + case DRM_CAP_CURSOR_WIDTH: + if (dev->mode_config.cursor_width) + req->value = dev->mode_config.cursor_width; + else + req->value = 64; + break; + case DRM_CAP_CURSOR_HEIGHT: + if (dev->mode_config.cursor_height) + req->value = dev->mode_config.cursor_height; + else + req->value = 64; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f227f544aa36..6e1a1a20cf6b 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D config DRM_EXYNOS_IPP bool "Exynos DRM IPP" - depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM + depends on DRM_EXYNOS help Choose this option if you want to use IPP feature for DRM. @@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR config DRM_EXYNOS_GSC bool "Exynos DRM GSC" - depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 + depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM help Choose this option if you want to use Exynos GSC for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 9d096a0c5f8d..215131ab1dd2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) file->driver_priv = file_priv; ret = exynos_drm_subdrv_open(dev, file); - if (ret) { - kfree(file_priv); - file->driver_priv = NULL; - } + if (ret) + goto out; anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, NULL, 0); if (IS_ERR(anon_filp)) { - kfree(file_priv); - return PTR_ERR(anon_filp); + ret = PTR_ERR(anon_filp); + goto out; } anon_filp->f_mode = FMODE_READ | FMODE_WRITE; file_priv->anon_filp = anon_filp; return ret; +out: + kfree(file_priv); + file->driver_priv = NULL; + return ret; } static void exynos_drm_preclose(struct drm_device *dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 380aec28840b..6c1885eedfdf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) reg_type = REG_TYPE_NONE; DRM_ERROR("Unknown register offset![%d]\n", reg_offset); break; - }; + } return reg_type; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d519a4e5fe40..09312b877470 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -16,7 +16,6 @@ #include <linux/types.h> #include <linux/clk.h> #include <linux/pm_runtime.h> -#include <plat/map-base.h> #include <drm/drmP.h> #include <drm/exynos_drm.h> @@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); /* - * quf == NULL condition means all event deletion. + * qbuf == NULL condition means all event deletion. * stop operations want to delete all event list. * another case delete only same buf id. */ diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a0e10aeb0e67..c021ddc1ffb4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -34,6 +34,7 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/hdmi.h> #include <drm/exynos_drm.h> @@ -59,19 +60,6 @@ #define HDMI_AUI_VERSION 0x01 #define HDMI_AUI_LENGTH 0x0A -/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ -enum HDMI_PACKET_TYPE { - /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ - /* InfoFrame packet type */ - HDMI_PACKET_TYPE_INFOFRAME = 0x80, - /* Vendor-Specific InfoFrame */ - HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, - /* Auxiliary Video information InfoFrame */ - HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, - /* Audio information InfoFrame */ - HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 -}; - enum hdmi_type { HDMI_TYPE13, HDMI_TYPE14, @@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = { }, }; -struct hdmi_infoframe { - enum HDMI_PACKET_TYPE type; - u8 ver; - u8 len; -}; - static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { return readl(hdata->regs + reg_id); @@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata, } static void hdmi_reg_infoframe(struct hdmi_context *hdata, - struct hdmi_infoframe *infoframe) + union hdmi_infoframe *infoframe) { u32 hdr_sum; u8 chksum; @@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, return; } - switch (infoframe->type) { - case HDMI_PACKET_TYPE_AVI: + switch (infoframe->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; + hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); + hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, + infoframe->any.version); + hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); + hdr_sum = infoframe->any.type + infoframe->any.version + + infoframe->any.length; /* Output format zero hardcoded ,RGB YBCR selection */ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | @@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), - infoframe->len, hdr_sum); + infoframe->any.length, hdr_sum); DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); break; - case HDMI_PACKET_TYPE_AUI: + case HDMI_INFOFRAME_TYPE_AUDIO: hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; + hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); + hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, + infoframe->any.version); + hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); + hdr_sum = infoframe->any.type + infoframe->any.version + + infoframe->any.length; chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), - infoframe->len, hdr_sum); + infoframe->any.length, hdr_sum); DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); break; @@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) static void hdmi_conf_init(struct hdmi_context *hdata) { - struct hdmi_infoframe infoframe; + union hdmi_infoframe infoframe; /* disable HPD interrupts from HDMI IP block, use GPIO instead */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | @@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata) hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { - infoframe.type = HDMI_PACKET_TYPE_AVI; - infoframe.ver = HDMI_AVI_VERSION; - infoframe.len = HDMI_AVI_LENGTH; + infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; + infoframe.any.version = HDMI_AVI_VERSION; + infoframe.any.length = HDMI_AVI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); - infoframe.type = HDMI_PACKET_TYPE_AUI; - infoframe.ver = HDMI_AUI_VERSION; - infoframe.len = HDMI_AUI_LENGTH; + infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; + infoframe.any.version = HDMI_AUI_VERSION; + infoframe.any.length = HDMI_AUI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); /* enable AVI packet every vsync, fixes purple line problem */ diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 400b0c4a10fb..faa77f543a07 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -208,7 +208,7 @@ struct tda998x_priv { # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ -# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) +# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0) # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ # define PLL_SERIAL_3_SRL_CCIR (1 << 0) @@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) { uint8_t buf[PB(5) + 1]; + memset(buf, 0, sizeof(buf)); buf[HB(0)] = 0x84; buf[HB(1)] = 0x01; buf[HB(2)] = 10; - buf[PB(0)] = 0; buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ buf[PB(4)] = p->audio_frame[4]; @@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, } div = 148500 / mode->clock; + if (div != 0) { + div--; + if (div > 3) + div = 3; + } /* mute the audio FIFO: */ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); @@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, if (priv->rev == TDA19988) { /* let incoming pixels fill the active space (if any) */ - reg_write(encoder, REG_ENABLE_SPACE, 0x01); + reg_write(encoder, REG_ENABLE_SPACE, 0x00); } /* must be last register set: */ @@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) { struct tda998x_priv *priv = to_tda998x_priv(encoder); drm_i2c_encoder_destroy(encoder); + if (priv->cec) + i2c_unregister_device(priv->cec); kfree(priv); } @@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client, priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); - priv->current_page = 0; + priv->current_page = 0xff; priv->cec = i2c_new_dummy(client->adapter, 0x34); + if (!priv->cec) { + kfree(priv); + return -ENODEV; + } priv->dpms = DRM_MODE_DPMS_OFF; encoder_slave->slave_priv = priv; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4a2bf8e3f739..df77e20e3c3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1831,6 +1831,14 @@ struct drm_i915_file_private { /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) +/* + * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts + * even when in MSI mode. This results in spurious interrupt warnings if the + * legacy irq no. is shared with another device. The kernel then disables that + * interrupt source and so prevents the other device from working properly. + */ +#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d7fd2fd2f0a5..990cf8f43efd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, va_list tmp; va_copy(tmp, args); - if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) + len = vsnprintf(NULL, 0, f, tmp); + va_end(tmp); + + if (!__i915_error_seek(e, len)) return; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 17d8fcb1b6f7..9fec71175571 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; } else { - enum transcoder cpu_transcoder = - intel_pipe_to_cpu_transcoder(dev_priv, pipe); + enum transcoder cpu_transcoder = (enum transcoder) pipe; u32 htotal; htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9fa24347963a..4c1672809493 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev, if (ring->id == RCS) len += 6; + /* + * BSpec MI_DISPLAY_FLIP for IVB: + * "The full packet must be contained within the same cache line." + * + * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same + * cacheline, if we ever start emitting more commands before + * the MI_DISPLAY_FLIP we may need to first emit everything else, + * then do the cacheline alignment, and finally emit the + * MI_DISPLAY_FLIP. + */ + ret = intel_ring_cacheline_align(ring); + if (ret) + goto err_unpin; + ret = intel_ring_begin(ring, len); if (ret) goto err_unpin; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ede4e8e290d..57552eb386b0 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, int i, ret, recv_bytes; uint32_t status; int try, precharge, clock = 0; - bool has_aux_irq = true; + bool has_aux_irq = HAS_AUX_IRQ(dev); uint32_t timeout; /* dp aux is extremely sensitive to irq latency, hence request the @@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, uint8_t msg[20]; int msg_bytes; uint8_t ack; + int retry; if (WARN_ON(send_bytes > 16)) return -E2BIG; @@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, msg[3] = send_bytes - 1; memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; - for (;;) { + for (retry = 0; retry < 7; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; ack >>= 4; if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) - break; + return send_bytes; else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - udelay(100); + usleep_range(400, 500); else return -EIO; } - return send_bytes; + + DRM_ERROR("too many retries, giving up\n"); + return -EIO; } /* Write a single byte to the aux channel in native mode */ @@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, int reply_bytes; uint8_t ack; int ret; + int retry; if (WARN_ON(recv_bytes > 19)) return -E2BIG; @@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, msg_bytes = 4; reply_bytes = recv_bytes + 1; - for (;;) { + for (retry = 0; retry < 7; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret == 0) @@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, return ret - 1; } else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - udelay(100); + usleep_range(400, 500); else return -EIO; } + + DRM_ERROR("too many retries, giving up\n"); + return -EIO; } static int @@ -1869,10 +1876,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); - /* init power sequencer on this pipe and port */ - intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, - &power_seq); + if (is_edp(intel_dp)) { + /* init power sequencer on this pipe and port */ + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, + &power_seq); + } intel_enable_dp(encoder); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b1dc33f47899..d33b61d0dd33 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo->data = bus; } -/* - * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI - * mode. This results in spurious interrupt warnings if the legacy irq no. is - * shared with another device. The kernel then disables that interrupt source - * and so prevents the other device from working properly. - */ -#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) static int gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2_status, diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 4e960ec7419f..acde2945eb8a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -226,6 +226,8 @@ struct opregion_asle { #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) +#define MAX_DSLP 1500 + #ifdef CONFIG_ACPI static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) { @@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) /* The spec says 2ms should be the default, but it's too small * for some machines. */ dslp = 50; - } else if (dslp > 500) { + } else if (dslp > MAX_DSLP) { /* Hey bios, trust must be earned. */ - WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); - dslp = 500; + DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, " + "using %u ms instead\n", dslp, MAX_DSLP); + dslp = MAX_DSLP; } /* The spec tells us to do this, but we are the only user... */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b7f1742caf87..31b36c5ac894 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring, return 0; } +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct intel_ring_buffer *ring) +{ + int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); + int ret; + + if (num_dwords == 0) + return 0; + + ret = intel_ring_begin(ring, num_dwords); + if (ret) + return ret; + + while (num_dwords--) + intel_ring_emit(ring, MI_NOOP); + + intel_ring_advance(ring); + + return 0; +} + void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 71a73f4fe252..0b243ce33714 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring, void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); +int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); static inline void intel_ring_emit(struct intel_ring_buffer *ring, u32 data) { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1964f4f0d452..84c5b13b33c9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -39,6 +39,7 @@ struct mdp4_crtc { spinlock_t lock; bool stale; uint32_t width, height; + uint32_t x, y; /* next cursor to scan-out: */ uint32_t next_iova; @@ -57,9 +58,16 @@ struct mdp4_crtc { #define PENDING_FLIP 0x2 atomic_t pending; - /* the fb that we currently hold a scanout ref to: */ + /* the fb that we logically (from PoV of KMS API) hold a ref + * to. Which we may not yet be scanning out (we may still + * be scanning out previous in case of page_flip while waiting + * for gpu rendering to complete: + */ struct drm_framebuffer *fb; + /* the fb that we currently hold a scanout ref to: */ + struct drm_framebuffer *scanout_fb; + /* for unref'ing framebuffers after scanout completes: */ struct drm_flip_work unref_fb_work; @@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -static void update_fb(struct drm_crtc *crtc, bool async, - struct drm_framebuffer *new_fb) +static void request_pending(struct drm_crtc *crtc, uint32_t pending) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct drm_framebuffer *old_fb = mdp4_crtc->fb; - if (old_fb) - drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); + atomic_or(pending, &mdp4_crtc->pending); + mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); +} + +static void crtc_flush(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + uint32_t i, flush = 0; + + for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { + struct drm_plane *plane = mdp4_crtc->planes[i]; + if (plane) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + flush |= pipe2flush(pipe_id); + } + } + flush |= ovlp2flush(mdp4_crtc->ovlp); + + DBG("%s: flush=%08x", mdp4_crtc->name, flush); + + mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); +} + +static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_framebuffer *old_fb = mdp4_crtc->fb; /* grab reference to incoming scanout fb: */ drm_framebuffer_reference(new_fb); mdp4_crtc->base.fb = new_fb; mdp4_crtc->fb = new_fb; - if (!async) { - /* enable vblank to pick up the old_fb */ - mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); - } + if (old_fb) + drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); +} + +/* unlike update_fb(), take a ref to the new scanout fb *before* updating + * plane, then call this. Needed to ensure we don't unref the buffer that + * is actually still being scanned out. + * + * Note that this whole thing goes away with atomic.. since we can defer + * calling into driver until rendering is done. + */ +static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + /* flush updates, to make sure hw is updated to new scanout fb, + * so that we can safely queue unref to current fb (ie. next + * vblank we know hw is done w/ previous scanout_fb). + */ + crtc_flush(crtc); + + if (mdp4_crtc->scanout_fb) + drm_flip_work_queue(&mdp4_crtc->unref_fb_work, + mdp4_crtc->scanout_fb); + + mdp4_crtc->scanout_fb = fb; + + /* enable vblank to complete flip: */ + request_pending(crtc, PENDING_FLIP); } /* if file!=NULL, this is preclose potential cancel-flip path */ @@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) spin_unlock_irqrestore(&dev->event_lock, flags); } -static void crtc_flush(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - uint32_t i, flush = 0; - - for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { - struct drm_plane *plane = mdp4_crtc->planes[i]; - if (plane) { - enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); - flush |= pipe2flush(pipe_id); - } - } - flush |= ovlp2flush(mdp4_crtc->ovlp); - - DBG("%s: flush=%08x", mdp4_crtc->name, flush); - - mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); -} - -static void request_pending(struct drm_crtc *crtc, uint32_t pending) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - - atomic_or(pending, &mdp4_crtc->pending); - mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); -} - static void pageflip_cb(struct msm_fence_cb *cb) { struct mdp4_crtc *mdp4_crtc = @@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb) if (!fb) return; + drm_framebuffer_reference(fb); mdp4_plane_set_scanout(mdp4_crtc->plane, fb); - crtc_flush(crtc); - - /* enable vblank to complete flip: */ - request_pending(crtc, PENDING_FLIP); + update_scanout(crtc, fb); } static void unref_fb_worker(struct drm_flip_work *work, void *val) @@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, mode->vsync_end, mode->vtotal, mode->type, mode->flags); + /* grab extra ref for update_scanout() */ + drm_framebuffer_reference(crtc->fb); + + ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, + 0, 0, mode->hdisplay, mode->vdisplay, + x << 16, y << 16, + mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + drm_framebuffer_unreference(crtc->fb); + dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", + mdp4_crtc->name, ret); + return ret; + } + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); @@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); - update_fb(crtc, false, crtc->fb); - - ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, mode->vdisplay << 16); - if (ret) { - dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", - mdp4_crtc->name, ret); - return ret; - } - if (dma == DMA_E) { mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); } + update_fb(crtc, crtc->fb); + update_scanout(crtc, crtc->fb); + return 0; } @@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_plane *plane = mdp4_crtc->plane; struct drm_display_mode *mode = &crtc->mode; + int ret; - update_fb(crtc, false, crtc->fb); + /* grab extra ref for update_scanout() */ + drm_framebuffer_reference(crtc->fb); - return mdp4_plane_mode_set(plane, crtc, crtc->fb, + ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, 0, 0, mode->hdisplay, mode->vdisplay, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + drm_framebuffer_unreference(crtc->fb); + return ret; + } + + update_fb(crtc, crtc->fb); + update_scanout(crtc, crtc->fb); + + return 0; } static void mdp4_crtc_load_lut(struct drm_crtc *crtc) @@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, mdp4_crtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); - update_fb(crtc, true, new_fb); + update_fb(crtc, new_fb); return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); } @@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc, static void update_cursor(struct drm_crtc *crtc) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); enum mdp4_dma dma = mdp4_crtc->dma; unsigned long flags; spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); if (mdp4_crtc->cursor.stale) { - struct mdp4_kms *mdp4_kms = get_kms(crtc); struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; uint32_t iova = mdp4_crtc->cursor.next_iova; @@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc) mdp4_crtc->cursor.scanout_bo = next_bo; mdp4_crtc->cursor.stale = false; } + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), + MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | + MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); } @@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, drm_gem_object_unreference_unlocked(old_bo); } + crtc_flush(crtc); request_pending(crtc, PENDING_CURSOR); return 0; @@ -542,12 +591,15 @@ fail: static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - enum mdp4_dma dma = mdp4_crtc->dma; + unsigned long flags; - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), - MDP4_DMA_CURSOR_POS_X(x) | - MDP4_DMA_CURSOR_POS_Y(y)); + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + mdp4_crtc->cursor.x = x; + mdp4_crtc->cursor.y = y; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + crtc_flush(crtc); + request_pending(crtc, PENDING_CURSOR); return 0; } @@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, crtc = &mdp4_crtc->base; mdp4_crtc->plane = plane; + mdp4_crtc->id = id; mdp4_crtc->ovlp = ovlp_id; mdp4_crtc->dma = dma_id; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 2406027200ec..1e893dd13859 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane, MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), - MDP4_PIPE_SRC_XY_X(crtc_x) | - MDP4_PIPE_SRC_XY_Y(crtc_y)); + MDP4_PIPE_DST_XY_X(crtc_x) | + MDP4_PIPE_DST_XY_Y(crtc_y)); mdp4_plane_set_scanout(plane, fb); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 71a3b2345eb3..f2794021f086 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); if (ret) { + drm_framebuffer_unreference(crtc->fb); dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", mdp5_crtc->name, ret); return ret; @@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 0, 0, mode->hdisplay, mode->vdisplay, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + drm_framebuffer_unreference(crtc->fb); + return ret; + } update_fb(crtc, crtc->fb); update_scanout(crtc, crtc->fb); - return ret; + return 0; } static void mdp5_crtc_load_lut(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8d60c969ac7..3da8264d3039 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, fail: if (obj) - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5281d4bc37f7..5423e914e491 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -163,7 +163,7 @@ retry: /* if locking succeeded, pin bo: */ - ret = msm_gem_get_iova(&msm_obj->base, + ret = msm_gem_get_iova_locked(&msm_obj->base, submit->gpu->id, &iova); /* this would break the logic in the fail path.. there is no @@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ - ptr = msm_gem_vaddr(&obj->base); + ptr = msm_gem_vaddr_locked(&obj->base); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); @@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail) { unsigned i; - mutex_lock(&submit->dev->struct_mutex); for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_unlock_unpin_bo(submit, i); list_del_init(&msm_obj->submit_entry); drm_gem_object_unreference(&msm_obj->base); } - mutex_unlock(&submit->dev->struct_mutex); ww_acquire_fini(&submit->ticket); kfree(submit); @@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->nr_cmds > MAX_CMDS) return -EINVAL; + mutex_lock(&dev->struct_mutex); + submit = submit_create(dev, gpu, args->nr_bos); if (!submit) { ret = -ENOMEM; @@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: if (submit) submit_cleanup(submit, !!ret); + mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4ebce8be489d..0cfe3f426ee4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_drm_private *priv = dev->dev_private; int i, ret; - mutex_lock(&dev->struct_mutex); - submit->fence = ++priv->next_fence; gpu->submitted_fence = submit->fence; @@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); } hangcheck_timer_reset(gpu); - mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e88145ba1bf5..d310c195bdfe 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o nouveau-y += core/subdev/mc/nv04.o nouveau-y += core/subdev/mc/nv40.o nouveau-y += core/subdev/mc/nv44.o +nouveau-y += core/subdev/mc/nv4c.o nouveau-y += core/subdev/mc/nv50.o nouveau-y += core/subdev/mc/nv94.o nouveau-y += core/subdev/mc/nv98.o diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index 1b653dd74a70..08b88591ed60 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c @@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; @@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; @@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; @@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; @@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 940eaa5d8b9a..9ad722e4e087 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) if (conf != ~0) { if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { u32 soff = (ffs(outp.or) - 1) * 0x08; - u32 ctrl = nv_rd32(priv, 0x610798 + soff); + u32 ctrl = nv_rd32(priv, 0x610794 + soff); u32 datarate; switch ((ctrl & 0x000f0000) >> 16) { diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 9a850fe19515..54c1b5b471cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine) nv_wr32(priv, 0x002270, cur->addr >> 12); nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); - if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) + if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) nv_error(priv, "runlist %d update timeout\n", engine); mutex_unlock(&nv_subdev(priv)->mutex); } diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 30ed19c52e05..7a367c402978 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c @@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, ustatus &= ~0x04030000; } if (ustatus && display) { - nv_error("%s - TP%d:", name, i); + nv_error(priv, "%s - TP%d:", name, i); nouveau_bitfield_print(nv50_mpc_traps, ustatus); pr_cont("\n"); ustatus = 0; diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index adc88b73d911..3c6738edd127 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -47,6 +47,7 @@ struct nouveau_mc_oclass { extern struct nouveau_oclass *nv04_mc_oclass; extern struct nouveau_oclass *nv40_mc_oclass; extern struct nouveau_oclass *nv44_mc_oclass; +extern struct nouveau_oclass *nv4c_mc_oclass; extern struct nouveau_oclass *nv50_mc_oclass; extern struct nouveau_oclass *nv94_mc_oclass; extern struct nouveau_oclass *nv98_mc_oclass; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index aa0fbbec7f08..ef0c9c4a8cc3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) u16 pcir; int i; + /* there is no prom on nv4x IGP's */ + if (device->card_type == NV_40 && device->chipset >= 0x4c) + return; + /* enable access to rom */ if (device->card_type >= NV_50) pcireg = 0x088050; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c index 9159a5ccee93..265d1253624a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c @@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) { .fini = _nouveau_fb_fini, }, .base.memtype = nv04_fb_memtype_valid, - .base.ram = &nv10_ram_oclass, + .base.ram = &nv1a_ram_oclass, .tile.regions = 8, .tile.init = nv10_fb_tile_init, .tile.fini = nv10_fb_tile_fini, diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h index b0d5c31606c1..81a408e7d034 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h @@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *, extern const struct nouveau_mc_intr nv04_mc_intr[]; int nv04_mc_init(struct nouveau_object *); void nv40_mc_msi_rearm(struct nouveau_mc *); +int nv44_mc_init(struct nouveau_object *object); int nv50_mc_init(struct nouveau_object *); extern const struct nouveau_mc_intr nv50_mc_intr[]; extern const struct nouveau_mc_intr nvc0_mc_intr[]; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 3bfee5c6c4f2..cc4d0d2d886e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c @@ -24,7 +24,7 @@ #include "nv04.h" -static int +int nv44_mc_init(struct nouveau_object *object) { struct nv04_mc_priv *priv = (void *)object; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c new file mode 100644 index 000000000000..a75c35ccf25c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c @@ -0,0 +1,45 @@ +/* + * Copyright 2014 Ilia Mirkin + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ilia Mirkin + */ + +#include "nv04.h" + +static void +nv4c_mc_msi_rearm(struct nouveau_mc *pmc) +{ + struct nv04_mc_priv *priv = (void *)pmc; + nv_wr08(priv, 0x088050, 0xff); +} + +struct nouveau_oclass * +nv4c_mc_oclass = &(struct nouveau_mc_oclass) { + .base.handle = NV_SUBDEV(MC, 0x4c), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv44_mc_init, + .fini = _nouveau_mc_fini, + }, + .intr = nv04_mc_intr, + .msi_rearm = nv4c_mc_msi_rearm, +}.base; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 488686d490c0..4aed1714b9ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) mem->bus.is_iomem = !dev->agp->cant_use_aperture; } #endif - if (!node->memtype) + if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) /* untiled */ break; /* fallthrough, tiled memory */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 78c8e7146d56..89c484d8ac26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) if (ret) goto fail_device; + dev->irq_enabled = true; + /* workaround an odd issue on nvc1 by disabling the device's * nosnoop capability. hopefully won't cause issues until a * better fix is found - assuming there is one... @@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_object *device; + dev->irq_enabled = false; device = drm->client.base.device; drm_put_dev(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 81638d7f2eff..471347edc27e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state) { struct nouveau_device *device = nouveau_dev(priv); - if (device->chipset >= 0x40) + if (device->card_type == NV_40 && device->chipset >= 0x4c) + nv_wr32(device, 0x088060, state); + else if (device->chipset >= 0x40) nv_wr32(device, 0x088054, state); else nv_wr32(device, 0x001854, state); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a9338c85630f..0d19f4f94d5a 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, u32 adjusted_clock = mode->clock; int encoder_mode = atombios_get_encoder_mode(encoder); u32 dp_clock = mode->clock; - int bpc = radeon_get_monitor_bpc(connector); + int bpc = radeon_crtc->bpc; bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); /* reset the pll flags */ @@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); /* Set NUM_BANKS. */ - if (rdev->family >= CHIP_BONAIRE) { + if (rdev->family >= CHIP_TAHITI) { unsigned tileb, index, num_banks, tile_split_bytes; /* Calculate the macrotile mode index. */ @@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + if (rdev->family >= CHIP_BONAIRE) + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + else + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { - /* SI and older. */ - if (rdev->family >= CHIP_TAHITI) - tmp = rdev->config.si.tile_config; - else if (rdev->family >= CHIP_CAYMAN) + /* NI and older. */ + if (rdev->family >= CHIP_CAYMAN) tmp = rdev->config.cayman.tile_config; else tmp = rdev->config.evergreen.tile_config; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a42d61571f49..2cec2ab02f80 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int bpc = 8; - if (connector) - bpc = radeon_get_monitor_bpc(connector); + if (encoder->crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + bpc = radeon_crtc->bpc; + } switch (bpc) { case 0: diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0fbd36f3d4e9..ea103ccdf4bd 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -29,6 +29,7 @@ #include "cypress_dpm.h" #include "btc_dpm.h" #include "atom.h" +#include <linux/seq_file.h> #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev) r600_free_extended_power_table(rdev); } +void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + if (rdev->family >= CHIP_CEDAR) { + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + } else { + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc); + } + } +} + u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h index 29e32de7e025..9c65be2d55a9 100644 --- a/drivers/gpu/drm/radeon/btcd.h +++ b/drivers/gpu/drm/radeon/btcd.h @@ -44,6 +44,10 @@ # define DYN_SPREAD_SPECTRUM_EN (1 << 23) # define AC_DC_SW (1 << 24) +#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c +# define CURRENT_PROFILE_INDEX_MASK (0xf << 4) +# define CURRENT_PROFILE_INDEX_SHIFT 4 + #define CG_BIF_REQ_AND_RSP 0x7f4 #define CG_CLIENT_REQ(x) ((x) << 0) #define CG_CLIENT_REQ_MASK (0xff << 0) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f2b9e21ce4da..5623e7542d99 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) case RADEON_HPD_6: if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) connected = true; - break; + break; default: break; } diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b6e01d5d2cce..351db361239d 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev) int kv_dpm_late_enable(struct radeon_device *rdev) { - int ret; + int ret = 0; if (rdev->irq.installed && r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index c351226ecb31..ca814276b075 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev, if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct ni_ps *ps = ni_get_ps(rps); - u16 vddc; struct rv7xx_pl *pl = &ps->performance_levels[index]; ps->performance_level_count = index + 1; @@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, /* patch up vddc if necessary */ if (pl->vddc == 0xff01) { - if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) - pl->vddc = vddc; + if (pi->max_vddc) + pl->vddc = pi->max_vddc; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { @@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev, void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; struct ni_ps *ps = ni_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56140b4e5bb2..cdbc4171fe73 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3991,6 +3991,10 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4a8ac1cd6b4c..024db37b1832 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -135,6 +135,9 @@ extern int radeon_hard_reset; /* R600+ */ #define R600_RING_TYPE_UVD_INDEX 5 +/* number of hw syncs before falling back on blocking */ +#define RADEON_NUM_SYNCS 4 + /* hardcode those limit for now */ #define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20) @@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, /* * Semaphores. */ -/* everything here is constant */ struct radeon_semaphore { struct radeon_sa_bo *sa_bo; signed waiters; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f74db43346fd..dda02bfc10a4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = { .get_sclk = &btc_dpm_get_sclk, .get_mclk = &btc_dpm_get_mclk, .print_power_state = &rv770_dpm_print_power_state, - .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, + .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv770_dpm_force_performance_level, .vblank_too_short = &btc_dpm_vblank_too_short, }, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b3bc433eed4c..ae637cfda783 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev); u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); bool btc_dpm_vblank_too_short(struct radeon_device *rdev); +void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m); int sumo_dpm_init(struct radeon_device *rdev); int sumo_dpm_enable(struct radeon_device *rdev); int sumo_dpm_late_enable(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d680608f6f5b..fbd8b930f2be 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_crtc->max_cursor_width = CURSOR_WIDTH; radeon_crtc->max_cursor_height = CURSOR_HEIGHT; } + dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; + dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; #if 0 radeon_crtc->mode_set.crtc = &radeon_crtc->base; diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 1b783f0e6d3a..15e44a7281ab 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, } /* 64 dwords should be enough for fence too */ - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); if (r) { dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 2b42aa1914f2..9006b32d5eed 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -34,14 +34,15 @@ int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { + uint32_t *cpu_addr; int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, - &(*semaphore)->sa_bo, 8, 8, true); + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, + 8 * RADEON_NUM_SYNCS, 8, true); if (r) { kfree(*semaphore); *semaphore = NULL; @@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev, } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); - *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; + + cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); + for (i = 0; i < RADEON_NUM_SYNCS; ++i) + cpu_addr[i] = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i) (*semaphore)->sync_to[i] = NULL; @@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int ring) { + unsigned count = 0; int i, r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, return -EINVAL; } + if (++count > RADEON_NUM_SYNCS) { + /* not enough room, wait manually */ + radeon_fence_wait_locked(fence); + continue; + } + /* allocate enough space for sync command */ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); if (r) { @@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, radeon_ring_commit(rdev, &rdev->ring[i]); radeon_fence_note_sync(fence, ring); + + semaphore->gpu_addr += 8; } return 0; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 80c595aba359..b5f63f5e22a3 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_ps *ps = rv770_get_ps(rps); u32 sclk, mclk; - u16 vddc; struct rv7xx_pl *pl; switch (index) { @@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, /* patch up vddc if necessary */ if (pl->vddc == 0xff01) { - if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) - pl->vddc = vddc; + if (pi->max_vddc) + pl->vddc = pi->max_vddc; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { @@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) { u32 vblank_time = r600_dpm_get_vblank_time(rdev); - u32 switch_limit = 300; - - /* quirks */ - /* ASUS K70AF */ - if ((rdev->pdev->device == 0x9553) && - (rdev->pdev->subsystem_vendor == 0x1043) && - (rdev->pdev->subsystem_device == 0x1c42)) - switch_limit = 200; + u32 switch_limit = 200; /* 300 */ /* RV770 */ /* mclk switching doesn't seem to work reliably on desktop RV770s */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 09ec4f6c53bb..83578324e5d1 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6338,6 +6338,10 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 146: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0471501338fb..0a2f5b4bca43 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev) void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; struct ni_ps *ps = ni_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f121efe12dc5..8b47b3cd0357 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev struct seq_file *m) { struct sumo_power_info *pi = sumo_get_pi(rdev); - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct radeon_ps *rps = &pi->current_rps; struct sumo_ps *ps = sumo_get_ps(rps); struct sumo_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2d447192d6f7..2da0e17eb960 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev, void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct trinity_power_info *pi = trinity_get_pi(rdev); + struct radeon_ps *rps = &pi->current_rps; struct trinity_ps *ps = trinity_get_ps(rps); struct trinity_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 824550db3fed..d1771004cb52 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); radeon_ring_write(ring, 2); - return; } /** diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 3302f99e7497..764be36397fd 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, agp_be->ttm.func = &ttm_agp_func; if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { + kfree(agp_be); return NULL; } diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index b645647b7776..bb594c11605e 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h @@ -1223,9 +1223,19 @@ typedef enum { #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 - +#define SVGA_3D_CMD_GB_SCREEN_DMA 1131 +#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 +#define SVGA_3D_CMD_GB_MOB_FENCE 1133 +#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 +#define SVGA_3D_CMD_NOP_ERROR 1137 + +#define SVGA_3D_CMD_RESERVED1 1138 +#define SVGA_3D_CMD_RESERVED2 1139 +#define SVGA_3D_CMD_RESERVED3 1140 +#define SVGA_3D_CMD_RESERVED4 1141 +#define SVGA_3D_CMD_RESERVED5 1142 #define SVGA_3D_CMD_MAX 1142 #define SVGA_3D_CMD_FUTURE_MAX 3000 @@ -1973,8 +1983,7 @@ struct { uint32 sizeInBytes; uint32 validSizeInBytes; SVGAMobFormat ptDepth; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ typedef @@ -1984,15 +1993,13 @@ struct { uint32 sizeInBytes; uint32 validSizeInBytes; SVGAMobFormat ptDepth; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ typedef struct { SVGAOTableType type; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ /* @@ -2005,8 +2012,7 @@ struct SVGA3dCmdDefineGBMob { SVGAMobFormat ptDepth; PPN base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ @@ -2017,8 +2023,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ typedef struct SVGA3dCmdDestroyGBMob { SVGAMobId mobid; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ /* @@ -2031,8 +2036,7 @@ struct SVGA3dCmdRedefineGBMob { SVGAMobFormat ptDepth; PPN base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ /* @@ -2045,8 +2049,7 @@ struct SVGA3dCmdDefineGBMob64 { SVGAMobFormat ptDepth; PPN64 base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ /* @@ -2059,8 +2062,7 @@ struct SVGA3dCmdRedefineGBMob64 { SVGAMobFormat ptDepth; PPN64 base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ /* @@ -2070,8 +2072,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ typedef struct SVGA3dCmdUpdateGBMobMapping { SVGAMobId mobid; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ /* @@ -2087,7 +2088,8 @@ struct SVGA3dCmdDefineGBSurface { uint32 multisampleCount; SVGA3dTextureFilter autogenFilter; SVGA3dSize size; -} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ +} __packed +SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ /* * Destroy a guest-backed surface. @@ -2096,7 +2098,8 @@ struct SVGA3dCmdDefineGBSurface { typedef struct SVGA3dCmdDestroyGBSurface { uint32 sid; -} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ +} __packed +SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ /* * Bind a guest-backed surface to an object. @@ -2106,7 +2109,8 @@ typedef struct SVGA3dCmdBindGBSurface { uint32 sid; SVGAMobId mobid; -} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ +} __packed +SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ /* * Conditionally bind a mob to a guest backed surface if testMobid @@ -2123,7 +2127,7 @@ struct{ SVGAMobId testMobid; SVGAMobId mobid; uint32 flags; -} +} __packed SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ /* @@ -2135,7 +2139,8 @@ typedef struct SVGA3dCmdUpdateGBImage { SVGA3dSurfaceImageId image; SVGA3dBox box; -} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ +} __packed +SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ /* * Update an entire guest-backed surface. @@ -2145,7 +2150,8 @@ struct SVGA3dCmdUpdateGBImage { typedef struct SVGA3dCmdUpdateGBSurface { uint32 sid; -} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ +} __packed +SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ /* * Readback an image in a guest-backed surface. @@ -2155,7 +2161,8 @@ struct SVGA3dCmdUpdateGBSurface { typedef struct SVGA3dCmdReadbackGBImage { SVGA3dSurfaceImageId image; -} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ +} __packed +SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ /* * Readback an entire guest-backed surface. @@ -2165,7 +2172,8 @@ struct SVGA3dCmdReadbackGBImage { typedef struct SVGA3dCmdReadbackGBSurface { uint32 sid; -} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ +} __packed +SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ /* * Readback a sub rect of an image in a guest-backed surface. After @@ -2179,7 +2187,7 @@ struct SVGA3dCmdReadbackGBImagePartial { SVGA3dSurfaceImageId image; SVGA3dBox box; uint32 invertBox; -} +} __packed SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ /* @@ -2190,7 +2198,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ typedef struct SVGA3dCmdInvalidateGBImage { SVGA3dSurfaceImageId image; -} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ +} __packed +SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ /* * Invalidate an entire guest-backed surface. @@ -2200,7 +2209,8 @@ struct SVGA3dCmdInvalidateGBImage { typedef struct SVGA3dCmdInvalidateGBSurface { uint32 sid; -} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ +} __packed +SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ /* * Invalidate a sub rect of an image in a guest-backed surface. After @@ -2214,7 +2224,7 @@ struct SVGA3dCmdInvalidateGBImagePartial { SVGA3dSurfaceImageId image; SVGA3dBox box; uint32 invertBox; -} +} __packed SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ /* @@ -2224,7 +2234,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ typedef struct SVGA3dCmdDefineGBContext { uint32 cid; -} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ +} __packed +SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ /* * Destroy a guest-backed context. @@ -2233,7 +2244,8 @@ struct SVGA3dCmdDefineGBContext { typedef struct SVGA3dCmdDestroyGBContext { uint32 cid; -} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ +} __packed +SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ /* * Bind a guest-backed context. @@ -2252,7 +2264,8 @@ struct SVGA3dCmdBindGBContext { uint32 cid; SVGAMobId mobid; uint32 validContents; -} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ +} __packed +SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ /* * Readback a guest-backed context. @@ -2262,7 +2275,8 @@ struct SVGA3dCmdBindGBContext { typedef struct SVGA3dCmdReadbackGBContext { uint32 cid; -} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ +} __packed +SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ /* * Invalidate a guest-backed context. @@ -2270,7 +2284,8 @@ struct SVGA3dCmdReadbackGBContext { typedef struct SVGA3dCmdInvalidateGBContext { uint32 cid; -} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ +} __packed +SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ /* * Define a guest-backed shader. @@ -2281,7 +2296,8 @@ struct SVGA3dCmdDefineGBShader { uint32 shid; SVGA3dShaderType type; uint32 sizeInBytes; -} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ +} __packed +SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ /* * Bind a guest-backed shader. @@ -2291,7 +2307,8 @@ typedef struct SVGA3dCmdBindGBShader { uint32 shid; SVGAMobId mobid; uint32 offsetInBytes; -} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ +} __packed +SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ /* * Destroy a guest-backed shader. @@ -2299,7 +2316,8 @@ typedef struct SVGA3dCmdBindGBShader { typedef struct SVGA3dCmdDestroyGBShader { uint32 shid; -} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ +} __packed +SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ typedef struct { @@ -2314,14 +2332,16 @@ struct { * Note that FLOAT and INT constants are 4-dwords in length, while * BOOL constants are 1-dword in length. */ -} SVGA3dCmdSetGBShaderConstInline; +} __packed +SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ typedef struct { uint32 cid; SVGA3dQueryType type; -} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ +} __packed +SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ typedef struct { @@ -2329,7 +2349,8 @@ struct { SVGA3dQueryType type; SVGAMobId mobid; uint32 offset; -} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ +} __packed +SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ /* @@ -2346,21 +2367,22 @@ struct { SVGA3dQueryType type; SVGAMobId mobid; uint32 offset; -} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ +} __packed +SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ typedef struct { SVGAMobId mobid; uint32 fbOffset; uint32 initalized; -} +} __packed SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ typedef struct { SVGAMobId mobid; uint32 gartOffset; -} +} __packed SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ @@ -2368,7 +2390,7 @@ typedef struct { uint32 gartOffset; uint32 numPages; -} +} __packed SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ @@ -2385,27 +2407,27 @@ struct { int32 xRoot; int32 yRoot; uint32 flags; -} +} __packed SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ typedef struct { uint32 stid; -} +} __packed SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ typedef struct { uint32 stid; SVGA3dSurfaceImageId image; -} +} __packed SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ typedef struct { uint32 stid; SVGA3dBox box; -} +} __packed SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ /* diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h index 8369c3ba10fe..ef3385096145 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h @@ -38,8 +38,11 @@ #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) +#define min_t(type, x, y) ((x) < (y) ? (x) : (y)) #define surf_size_struct SVGA3dSize #define u32 uint32 +#define u64 uint64_t +#define U32_MAX ((u32)~0U) #endif /* __KERNEL__ */ @@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = { static inline u32 clamped_umul32(u32 a, u32 b) { - uint64_t tmp = (uint64_t) a*b; - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; + u64 tmp = (u64) a*b; + return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; } static inline const struct svga3d_surface_desc * @@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, bool cubemap) { const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u32 total_size = 0; + u64 total_size = 0; u32 mip; for (mip = 0; mip < num_mip_levels; mip++) { @@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, if (cubemap) total_size *= SVGA3D_MAX_SURFACE_FACES; - return total_size; + return (u32) min_t(u64, total_size, (u64) U32_MAX); } diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 71defa4d2d75..11323dd5196f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h @@ -169,10 +169,17 @@ enum { SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ + SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ + SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ - SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ + SVGA_REG_CMD_PREPEND_LOW = 53, + SVGA_REG_CMD_PREPEND_HIGH = 54, + SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, + SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, + SVGA_REG_MOB_MAX_SIZE = 57, + SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ /* Next 768 (== 256*3) registers exist for colormap */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 9426c53fb483..1e80152674b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -551,8 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = bi->i1.shader_type; - cmd->body.shid = - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_fifo_commit(dev_priv, sizeof(*cmd)); return 0; @@ -585,8 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = bi->i1.rt_type; - cmd->body.target.sid = - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); cmd->body.target.face = 0; cmd->body.target.mipmap = 0; vmw_fifo_commit(dev_priv, sizeof(*cmd)); @@ -628,8 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, cmd->body.c.cid = bi->ctx->id; cmd->body.s1.stage = bi->i1.texture_stage; cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; - cmd->body.s1.value = - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_fifo_commit(dev_priv, sizeof(*cmd)); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 3bdc0adc656d..0083cbf99edf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->memory_size = 512*1024*1024; } dev_priv->max_mob_pages = 0; + dev_priv->max_mob_size = 0; if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { uint64_t mem_size = vmw_read(dev_priv, @@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->prim_bb_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); + dev_priv->max_mob_size = + vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); } else dev_priv->prim_bb_mem = dev_priv->vram_size; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index ecaa302a6154..9e4be1725985 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -386,6 +386,7 @@ struct vmw_private { uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t max_mob_pages; + uint32_t max_mob_size; uint32_t memory_size; bool has_gmr; bool has_mob; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 269b85cc875a..efb575a7996c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -602,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, { struct vmw_cid_cmd { SVGA3dCmdHeader header; - __le32 cid; + uint32_t cid; } *cmd; cmd = container_of(header, struct vmw_cid_cmd, header); @@ -1835,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, return 0; } -static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { +static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, @@ -2032,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, goto out_invalid; entry = &vmw_cmd_entries[cmd_id]; + if (unlikely(!entry->func)) + goto out_invalid; + if (unlikely(!entry->user_allow && !sw_context->kernel)) goto out_privileged; @@ -2469,7 +2472,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (dev_priv->has_mob) { ret = vmw_rebind_contexts(sw_context); if (unlikely(ret != 0)) - goto out_err; + goto out_unlock_binding; } cmd = vmw_fifo_reserve(dev_priv, command_size); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index f9881f9e62bd..47b70949bf3a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -102,6 +102,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, vmw_fp->gb_aware = true; param->value = dev_priv->max_mob_pages * PAGE_SIZE; break; + case DRM_VMW_PARAM_MAX_MOB_SIZE: + param->value = dev_priv->max_mob_size; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 217d941b8176..ee3856578a12 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -371,13 +371,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } -int vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, - size_t shader_size, - size_t offset, - SVGA3dShaderType shader_type, - struct ttm_object_file *tfile, - u32 *handle) +static int vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type, + struct ttm_object_file *tfile, + u32 *handle) { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; @@ -779,6 +779,8 @@ vmw_compat_shader_man_create(struct vmw_private *dev_priv) int ret; man = kzalloc(sizeof(*man), GFP_KERNEL); + if (man == NULL) + return ERR_PTR(-ENOMEM); man->dev_priv = dev_priv; INIT_LIST_HEAD(&man->list); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 497558127bb3..f822fd2a1ada 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS), + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3bfac3accd22..cc32a6f96c64 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, @@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 8fae6d1414cc..c24908f14934 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -157,6 +157,7 @@ struct mousevsc_dev { u32 report_desc_size; struct hv_input_dev_info hid_dev_info; struct hid_device *hid_device; + u8 input_buf[HID_MAX_BUFFER_SIZE]; }; @@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device, struct synthhid_msg *hid_msg; struct mousevsc_dev *input_dev = hv_get_drvdata(device); struct synthhid_input_report *input_report; + size_t len; pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + (packet->offset8 << 3)); @@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device, (struct synthhid_input_report *)pipe_msg->data; if (!input_dev->init_complete) break; - hid_input_report(input_dev->hid_device, - HID_INPUT_REPORT, input_report->buffer, - input_report->header.size, 1); + + len = min(input_report->header.size, + (u32)sizeof(input_dev->input_buf)); + memcpy(input_dev->input_buf, input_report->buffer, len); + hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, + input_dev->input_buf, len, 1); break; default: pr_err("unsupported hid msg type - type %d len %d", diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5a5248f2cc07..22f28d6b33a8 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -135,6 +135,7 @@ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 @@ -240,6 +241,7 @@ #define USB_VENDOR_ID_CYGNAL 0x10c4 #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a +#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9 #define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 @@ -451,6 +453,9 @@ #define USB_VENDOR_ID_INTEL_1 0x8087 #define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa +#define USB_VENDOR_ID_STM_0 0x0483 +#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1 + #define USB_VENDOR_ID_ION 0x15e4 #define USB_DEVICE_ID_ICADE 0x0132 @@ -619,6 +624,8 @@ #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c +#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 +#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -644,6 +651,7 @@ #define USB_VENDOR_ID_NEXIO 0x1870 #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d +#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110 #define USB_VENDOR_ID_NEXTWINDOW 0x1926 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index d50e7313b171..a713e6211419 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work) /* fall back to generic raw-output-report */ len = ((report->size - 1) >> 3) + 1 + (report->id > 0); - buf = kmalloc(len, GFP_KERNEL); + buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!buf) return; diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index c6ef6eed3091..404a3a8a82f1 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), .driver_data = MS_DUPLICATE_USAGES }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), + .driver_data = 0 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), + .driver_data = 0 }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), .driver_data = MS_PRESENTER }, diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index f134d73beca1..221d503f1c24 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, USB_DEVICE_ID_MULTITOUCH_3200) }, + /* FocalTech Panels */ + { .driver_data = MT_CLS_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL, + USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) }, + /* GeneralTouch panel */ { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 46f4480035bc..9c22e14c57f0 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = { { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, USB_DEVICE_ID_INTEL_HID_SENSOR), .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, + USB_DEVICE_ID_STM_HID_SENSOR), + .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) }, { } diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d1f81f52481a..42eebd14de1f 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep, int ret; int len = i2c_hid_get_report_length(rep) - 2; - buf = kzalloc(len, GFP_KERNEL); + buf = hid_alloc_report_buf(rep, GFP_KERNEL); if (!buf) return; diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 175ec0afb70c..dbd83878ff99 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -74,6 +74,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index af6edf9b1936..f2d7bf90c9fe 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, int ret = 0; struct vmbus_channel_initiate_contact *msg; unsigned long flags; - int t; init_completion(&msginfo->waitevent); @@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); + if (version == VERSION_WIN8) + msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; /* * Add to list before we send the request since we may @@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, } /* Wait for the connection response */ - t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); - if (t == 0) { - spin_lock_irqsave(&vmbus_connection.channelmsg_lock, - flags); - list_del(&msginfo->msglistentry); - spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, - flags); - return -ETIMEDOUT; - } + wait_for_completion(&msginfo->waitevent); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8c23203915af..8a17f01e8672 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -145,7 +145,7 @@ struct ntc_data { static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) { struct iio_channel *channel = pdata->chan; - unsigned int result; + s64 result; int val, ret; ret = iio_read_channel_raw(channel, &val); @@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) } /* unit: mV */ - result = pdata->pullup_uv * val; + result = pdata->pullup_uv * (s64) val; result >>= 12; - return result; + return (int)result; } static const struct of_device_id ntc_match[] = { diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index b8c5187b9ee0..d52d84937ad3 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -97,7 +97,6 @@ enum { enum { MV64XXX_I2C_ACTION_INVALID, MV64XXX_I2C_ACTION_CONTINUE, - MV64XXX_I2C_ACTION_OFFLOAD_SEND_START, MV64XXX_I2C_ACTION_SEND_START, MV64XXX_I2C_ACTION_SEND_RESTART, MV64XXX_I2C_ACTION_OFFLOAD_RESTART, @@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data) unsigned long ctrl_reg; struct i2c_msg *msg = drv_data->msgs; + if (!drv_data->offload_enabled) + return -EOPNOTSUPP; + drv_data->msg = msg; drv_data->byte_posn = 0; drv_data->bytes_left = msg->len; @@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) drv_data->msgs++; drv_data->num_msgs--; - if (!(drv_data->offload_enabled && - mv64xxx_i2c_offload_msg(drv_data))) { + if (mv64xxx_i2c_offload_msg(drv_data) < 0) { drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); @@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) drv_data->reg_base + drv_data->reg_offsets.control); break; - case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START: - if (!mv64xxx_i2c_offload_msg(drv_data)) - break; - else - drv_data->action = MV64XXX_I2C_ACTION_SEND_START; - /* FALLTHRU */ case MV64XXX_I2C_ACTION_SEND_START: - writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, - drv_data->reg_base + drv_data->reg_offsets.control); + /* Can we offload this msg ? */ + if (mv64xxx_i2c_offload_msg(drv_data) < 0) { + /* No, switch to standard path */ + mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs); + writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, + drv_data->reg_base + drv_data->reg_offsets.control); + } break; case MV64XXX_I2C_ACTION_SEND_ADDR_1: @@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg, unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->offload_enabled) { - drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START; - drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; - } else { - mv64xxx_i2c_prepare_for_io(drv_data, msg); - drv_data->action = MV64XXX_I2C_ACTION_SEND_START; - drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; - } + drv_data->action = MV64XXX_I2C_ACTION_SEND_START; + drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; + drv_data->send_stop = is_last; drv_data->block = 1; mv64xxx_i2c_do_action(drv_data); diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 3bec9220df04..bfec313492b3 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { { }, }; -#define BMA180_CHANNEL(_index) { \ +#define BMA180_CHANNEL(_axis) { \ .type = IIO_ACCEL, \ - .indexed = 1, \ - .channel = (_index), \ + .modified = 1, \ + .channel2 = IIO_MOD_##_axis, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .scan_index = (_index), \ + .scan_index = AXIS_##_axis, \ .scan_type = { \ .sign = 's', \ .realbits = 14, \ @@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { } static const struct iio_chan_spec bma180_channels[] = { - BMA180_CHANNEL(AXIS_X), - BMA180_CHANNEL(AXIS_Y), - BMA180_CHANNEL(AXIS_Z), - IIO_CHAN_SOFT_TIMESTAMP(4), + BMA180_CHANNEL(X), + BMA180_CHANNEL(Y), + BMA180_CHANNEL(Z), + IIO_CHAN_SOFT_TIMESTAMP(3), }; static irqreturn_t bma180_trigger_handler(int irq, void *p) diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index e283f2f2ee2f..360259266d4f 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client, st->client = client; st->vref_uv = st->chip_info->int_vref_mv * 1000; - vref = devm_regulator_get(&client->dev, "vref"); + vref = devm_regulator_get_optional(&client->dev, "vref"); if (!IS_ERR(vref)) { int vref_uv; diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h index 2f8f9d632386..0916bf6b6c31 100644 --- a/drivers/iio/imu/adis16400.h +++ b/drivers/iio/imu/adis16400.h @@ -189,6 +189,7 @@ enum { ADIS16300_SCAN_INCLI_X, ADIS16300_SCAN_INCLI_Y, ADIS16400_SCAN_ADC, + ADIS16400_SCAN_TIMESTAMP, }; #ifdef CONFIG_IIO_BUFFER diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 368660dfe135..7c582f7ae34e 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = { ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), - IIO_CHAN_SOFT_TIMESTAMP(12) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16448_channels[] = { @@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = { }, }, ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), - IIO_CHAN_SOFT_TIMESTAMP(11) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16350_channels[] = { @@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = { ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), - IIO_CHAN_SOFT_TIMESTAMP(11) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16300_channels[] = { @@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = { ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), - IIO_CHAN_SOFT_TIMESTAMP(14) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16334_channels[] = { @@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = { ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), - IIO_CHAN_SOFT_TIMESTAMP(8) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static struct attribute *adis16400_attributes[] = { diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index 3d8110157f2d..94daa9fc1247 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev, { struct tsl2563_chip *chip = iio_priv(indio_dev); - if (chan->channel == IIO_MOD_LIGHT_BOTH) + if (mask != IIO_CHAN_INFO_CALIBSCALE) + return -EINVAL; + if (chan->channel2 == IIO_MOD_LIGHT_BOTH) chip->calib0 = calib_from_sysfs(val); - else + else if (chan->channel2 == IIO_MOD_LIGHT_IR) chip->calib1 = calib_from_sysfs(val); + else + return -EINVAL; return 0; } @@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, - long m) + long mask) { int ret = -EINVAL; u32 calib0, calib1; struct tsl2563_chip *chip = iio_priv(indio_dev); mutex_lock(&chip->lock); - switch (m) { + switch (mask) { case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { @@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, ret = tsl2563_get_adc(chip); if (ret) goto error_ret; - if (chan->channel == 0) + if (chan->channel2 == IIO_MOD_LIGHT_BOTH) *val = chip->data0; else *val = chip->data1; @@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_CALIBSCALE: - if (chan->channel == 0) + if (chan->channel2 == IIO_MOD_LIGHT_BOTH) *val = calib_to_sysfs(chip->calib0); else *val = calib_to_sysfs(chip->calib1); diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index ff284e5afd95..05423543f89d 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -85,6 +85,7 @@ #define AK8975_MAX_CONVERSION_TIMEOUT 500 #define AK8975_CONVERSION_DONE_POLL_TIME 10 #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) +#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256) /* * Per-instance context data for the device. @@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client) * * Since 1uT = 0.01 gauss, our final scale factor becomes: * - * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100 - * Hadj = H * ((ASA + 128) * 30 / 256 + * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100 + * Hadj = H * ((ASA + 128) * 0.003) / 256 * * Since ASA doesn't change, we cache the resultant scale factor into the * device context in ak8975_setup(). */ - data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8; - data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8; - data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8; + data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]); + data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]); + data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]); return 0; } @@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_RAW: return ak8975_read_axis(indio_dev, chan->address, val); case IIO_CHAN_INFO_SCALE: - *val = data->raw_to_gauss[chan->address]; - return IIO_VAL_INT; + *val = 0; + *val2 = data->raw_to_gauss[chan->address]; + return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index 4b65b6d3bdb1..f66955fb3509 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf, while (n-- > 0) len += scnprintf(buf + len, PAGE_SIZE - len, - "%d.%d ", vals[n][0], vals[n][1]); + "%d.%06d ", vals[n][0], vals[n][1]); /* replace trailing space by newline */ buf[len - 1] = '\n'; @@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: + if (iio_buffer_enabled(indio_dev)) + return -EBUSY; + switch (chan->type) { case IIO_MAGN: /* in 0.1 uT / LSB */ ret = mag3110_read(data, buffer); @@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev, struct mag3110_data *data = iio_priv(indio_dev); int rate; + if (iio_buffer_enabled(indio_dev)) + return -EBUSY; + switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: rate = mag3110_get_samp_freq_index(data, val, val2); diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index d53cf519f42a..00400c352c1a 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) /* Initialize network device */ if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { + ret = -ENOMEM; iounmap(mmio_regs); goto bail4; } @@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) goto bail10; } - if (c2_register_device(c2dev)) + ret = c2_register_device(c2dev); + if (ret) goto bail10; return 0; diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index b7c986990053..d2a6d961344b 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev) goto bail4; /* Initialize cached the adapter limits */ - if (c2_rnic_query(c2dev, &c2dev->props)) + err = c2_rnic_query(c2dev, &c2dev->props); + if (err) goto bail5; /* Initialize the PD pool */ diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 45126879ad28..d286bdebe2ab 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) goto free_dst; } + neigh_release(neigh); step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; window = (__force u16) htons((__force u16)tcph->window); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c2702f549f10..e81c5547e647 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? IB_WIDTH_4X : IB_WIDTH_1X; props->active_speed = IB_SPEED_QDR; - props->port_cap_flags = IB_PORT_CM_SUP; + props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; props->pkey_tbl_len = 1; @@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = { &dev_attr_board_id }; +static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, + struct net_device *dev) +{ + memcpy(eui, dev->dev_addr, 3); + memcpy(eui + 5, dev->dev_addr + 3, 3); + if (vlan_id < 0x1000) { + eui[3] = vlan_id >> 8; + eui[4] = vlan_id & 0xff; + } else { + eui[3] = 0xff; + eui[4] = 0xfe; + } + eui[0] ^= 2; +} + static void update_gids_task(struct work_struct *work) { struct update_gid_work *gw = container_of(work, struct update_gid_work, work); @@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work) struct mlx4_cmd_mailbox *mailbox; union ib_gid *gids; int err; - int i; struct mlx4_dev *dev = gw->dev->dev; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work) gids = mailbox->buf; memcpy(gids, gw->gids, sizeof(gw->gids)); - for (i = 1; i < gw->dev->num_ports + 1; i++) { - if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == - IB_LINK_LAYER_ETHERNET) { - err = mlx4_cmd(dev, mailbox->dma, - MLX4_SET_PORT_GID_TABLE << 8 | i, - 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); - if (err) - pr_warn(KERN_WARNING - "set port %d command failed\n", i); - } + if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == + IB_LINK_LAYER_ETHERNET) { + err = mlx4_cmd(dev, mailbox->dma, + MLX4_SET_PORT_GID_TABLE << 8 | gw->port, + 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + pr_warn(KERN_WARNING + "set port %d command failed\n", gw->port); } mlx4_free_cmd_mailbox(dev, mailbox); @@ -1425,7 +1437,8 @@ free: } static int update_gid_table(struct mlx4_ib_dev *dev, int port, - union ib_gid *gid, int clear) + union ib_gid *gid, int clear, + int default_gid) { struct update_gid_work *work; int i; @@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, int found = -1; int max_gids; - max_gids = dev->dev->caps.gid_table_len[port]; - for (i = 0; i < max_gids; ++i) { - if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, - sizeof(*gid))) - found = i; - - if (clear) { - if (found >= 0) { - need_update = 1; - dev->iboe.gid_table[port - 1][found] = zgid; - break; - } - } else { - if (found >= 0) - break; - - if (free < 0 && - !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, + if (default_gid) { + free = 0; + } else { + max_gids = dev->dev->caps.gid_table_len[port]; + for (i = 1; i < max_gids; ++i) { + if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, sizeof(*gid))) - free = i; + found = i; + + if (clear) { + if (found >= 0) { + need_update = 1; + dev->iboe.gid_table[port - 1][found] = + zgid; + break; + } + } else { + if (found >= 0) + break; + + if (free < 0 && + !memcmp(&dev->iboe.gid_table[port - 1][i], + &zgid, sizeof(*gid))) + free = i; + } } } @@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, return 0; } -static int reset_gid_table(struct mlx4_ib_dev *dev) +static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid) { - struct update_gid_work *work; + gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); +} + +static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) +{ + struct update_gid_work *work; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return -ENOMEM; - memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); + + memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); memset(work->gids, 0, sizeof(work->gids)); INIT_WORK(&work->work, reset_gids_task); work->dev = dev; + work->port = port; queue_work(wq, &work->work); return 0; } @@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? rdma_vlan_dev_real_dev(event_netdev) : event_netdev; + union ib_gid default_gid; + + mlx4_make_default_gid(real_dev, &default_gid); + + if (!memcmp(gid, &default_gid, sizeof(*gid))) + return 0; if (event != NETDEV_DOWN && event != NETDEV_UP) return 0; @@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, (!netif_is_bond_master(real_dev) && (real_dev == iboe->netdevs[port - 1]))) update_gid_table(ibdev, port, gid, - event == NETDEV_DOWN); + event == NETDEV_DOWN, 0); spin_unlock(&iboe->lock); return 0; @@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, rdma_vlan_dev_real_dev(dev) : dev; iboe = &ibdev->iboe; - spin_lock(&iboe->lock); for (port = 1; port <= MLX4_MAX_PORTS; ++port) if ((netif_is_bond_master(real_dev) && @@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, (real_dev == iboe->netdevs[port - 1]))) break; - spin_unlock(&iboe->lock); - if ((port == 0) || (port > MLX4_MAX_PORTS)) return 0; else @@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, /*ifa->ifa_address;*/ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid); - update_gid_table(ibdev, port, &gid, 0); + update_gid_table(ibdev, port, &gid, 0, 0); } endfor_ifa(in_dev); in_dev_put(in_dev); @@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { pgid = (union ib_gid *)&ifp->addr; - update_gid_table(ibdev, port, pgid, 0); + update_gid_table(ibdev, port, pgid, 0, 0); } read_unlock_bh(&in6_dev->lock); in6_dev_put(in6_dev); @@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, #endif } +static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, + struct net_device *dev, u8 port) +{ + union ib_gid gid; + mlx4_make_default_gid(dev, &gid); + update_gid_table(ibdev, port, &gid, 0, 1); +} + static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) { struct net_device *dev; + struct mlx4_ib_iboe *iboe = &ibdev->iboe; + int i; - if (reset_gid_table(ibdev)) - return -1; + for (i = 1; i <= ibdev->num_ports; ++i) + if (reset_gid_table(ibdev, i)) + return -1; read_lock(&dev_base_lock); + spin_lock(&iboe->lock); for_each_netdev(&init_net, dev) { u8 port = mlx4_ib_get_dev_port(dev, ibdev); @@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) mlx4_ib_get_dev_addr(dev, ibdev, port); } + spin_unlock(&iboe->lock); read_unlock(&dev_base_lock); return 0; @@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) spin_lock(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { + enum ib_port_state port_state = IB_PORT_NOP; struct net_device *old_master = iboe->masters[port - 1]; + struct net_device *curr_netdev; struct net_device *curr_master; + iboe->netdevs[port - 1] = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); + if (iboe->netdevs[port - 1]) + mlx4_ib_set_default_gid(ibdev, + iboe->netdevs[port - 1], port); + curr_netdev = iboe->netdevs[port - 1]; if (iboe->netdevs[port - 1] && netif_is_bond_slave(iboe->netdevs[port - 1])) { - rtnl_lock(); iboe->masters[port - 1] = netdev_master_upper_dev_get( iboe->netdevs[port - 1]); - rtnl_unlock(); + } else { + iboe->masters[port - 1] = NULL; } curr_master = iboe->masters[port - 1]; + if (curr_netdev) { + port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + } else { + reset_gid_table(ibdev, port); + } + /* if using bonding/team and a slave port is down, we don't the bond IP + * based gids in the table since flows that select port by gid may get + * the down port. + */ + if (curr_master && (port_state == IB_PORT_DOWN)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + } /* if bonding is used it is possible that we add it to masters - only after IP address is assigned to the net bonding - interface */ - if (curr_master && (old_master != curr_master)) + * only after IP address is assigned to the net bonding + * interface. + */ + if (curr_master && (old_master != curr_master)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_get_dev_addr(curr_master, ibdev, port); + } + + if (!curr_master && (old_master != curr_master)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); + } } spin_unlock(&iboe->lock); @@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) int i, j; int err; struct mlx4_ib_iboe *iboe; + int ib_num_ports = 0; pr_info_once("%s", mlx4_ib_version); @@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->counters[i] = -1; } + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_num_ports++; + spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { + if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && + ib_num_ports) { ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, MLX4_IB_UC_STEER_QPN_ALIGN, @@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) } } #endif + for (i = 1 ; i <= ibdev->num_ports ; ++i) + reset_gid_table(ibdev, i); + rtnl_lock(); mlx4_ib_scan_netdevs(ibdev); + rtnl_unlock(); mlx4_ib_init_gid_table(ibdev); } diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index 8e6aebfaf8a4..10df386c6344 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -1,6 +1,6 @@ config MLX5_INFINIBAND tristate "Mellanox Connect-IB HCA support" - depends on NETDEVICES && ETHERNET && PCI && X86 + depends on NETDEVICES && ETHERNET && PCI select NET_VENDOR_MELLANOX select MLX5_CORE ---help--- diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9660d093f8cf..aa03e732b6a8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | - IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; + IB_DEVICE_RC_RNR_NAK_GEN; flags = dev->mdev.caps.flags; if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; @@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_alloc_ucontext_req req; + struct mlx5_ib_alloc_ucontext_req_v2 req; struct mlx5_ib_alloc_ucontext_resp resp; struct mlx5_ib_ucontext *context; struct mlx5_uuar_info *uuari; struct mlx5_uar *uars; int gross_uuars; int num_uars; + int ver; int uuarn; int err; int i; + int reqlen; if (!dev->ib_active) return ERR_PTR(-EAGAIN); - err = ib_copy_from_udata(&req, udata, sizeof(req)); + memset(&req, 0, sizeof(req)); + reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); + if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) + ver = 0; + else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) + ver = 2; + else + return ERR_PTR(-EINVAL); + + err = ib_copy_from_udata(&req, udata, reqlen); if (err) return ERR_PTR(err); + if (req.flags || req.reserved) + return ERR_PTR(-EINVAL); + if (req.total_num_uuars > MLX5_MAX_UUARS) return ERR_PTR(-ENOMEM); @@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (err) goto out_uars; + uuari->ver = ver; uuari->num_low_latency_uuars = req.num_low_latency_uuars; uuari->uars = uars; uuari->num_uars = num_uars; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ae37fb9bf262..7dfe8a1c84cf 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type) case IB_QPT_UC: size += sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg); + sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg); break; case IB_QPT_UD: @@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari, break; case MLX5_IB_LATENCY_CLASS_MEDIUM: - uuarn = alloc_med_class_uuar(uuari); + if (uuari->ver < 2) + uuarn = -ENOMEM; + else + uuarn = alloc_med_class_uuar(uuari); break; case MLX5_IB_LATENCY_CLASS_HIGH: - uuarn = alloc_high_class_uuar(uuari); + if (uuari->ver < 2) + uuarn = -ENOMEM; + else + uuarn = alloc_high_class_uuar(uuari); break; case MLX5_IB_LATENCY_CLASS_FAST_PATH: @@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, int err; uuari = &dev->mdev.priv.uuari; - if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) - qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; + if (init_attr->create_flags) + return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index 32a2a5dfc523..0f4f8e42a17f 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h @@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req { __u32 num_low_latency_uuars; }; +struct mlx5_ib_alloc_ucontext_req_v2 { + __u32 total_num_uuars; + __u32 num_low_latency_uuars; + __u32 flags; + __u32 reserved; +}; + struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 429141078eec..353c7b05a90a 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); /* Initialize network devices */ - if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) + netdev = nes_netdev_init(nesdev, mmio_regs); + if (netdev == NULL) { + ret = -ENOMEM; goto bail7; + } /* Register network device */ ret = register_netdev(netdev); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 2ca86ca818bd..1a8a945efa60 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; if (is_vlan) - netdev = vlan_dev_real_dev(netdev); + netdev = rdma_vlan_dev_real_dev(netdev); rcu_read_lock(); list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index aa92f40c9d50..e0cc201be41a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev, props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | - IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; + IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; props->gid_tbl_len = OCRDMA_MAX_SGID; props->pkey_tbl_len = 1; props->bad_pkey_cntr = 0; @@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & - OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> + OCRDMA_QP_PARAMS_TCLASS_MASK) >> OCRDMA_QP_PARAMS_TCLASS_SHIFT; qp_attr->ah_attr.ah_flags = IB_AH_GRH; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5bfc02f450e6..d1bd21319d7d 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); + /* ensure previous Tx parameters are not still forced */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); + if (qib_compat_ddr_negotiate) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index 7ecc6061f1f4..f8dfd76be89f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c @@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, { enum usnic_transport_type trans_type = qp_flow->trans_type; int err; + uint16_t port_num = 0; switch (trans_type) { case USNIC_TRANSPORT_ROCE_CUSTOM: @@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, case USNIC_TRANSPORT_IPV4_UDP: err = usnic_transport_sock_get_addr(qp_flow->udp.sock, NULL, NULL, - (uint16_t *) id); + &port_num); if (err) return err; + /* + * Copy port_num to stack first and then to *id, + * so that the short to int cast works for little + * and big endian systems. + */ + *id = port_num; break; default: usnic_err("Unsupported transport %u\n", trans_type); diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 538822684d5b..334f34b1cd46 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); kmem_cache_free(ig.desc_cache, tx_desc); + tx_desc = NULL; } atomic_dec(&ib_conn->post_send_buf_count); - if (tx_desc->type == ISCSI_TX_CONTROL) { + if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *) ((long)(void *)tx_desc - sizeof(struct iscsi_task)); diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index afe95674008b..ca37edef2791 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) /* getting here when the state is UP means that the conn is being * * terminated asynchronously from the iSCSI layer's perspective. */ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, - ISCSI_ERR_CONN_FAILED); + ISER_CONN_TERMINATING)){ + if (ib_conn->iser_conn) + iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + else + iser_err("iscsi_iser connection isn't bound\n"); + } /* Complete the termination process if no posts are pending */ if (ib_conn->post_recv_buf_count == 0 && diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2b161be3c1a3..d18d08a076e8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) if (ret) { pr_err("Failed to create fastreg descriptor err=%d\n", ret); + kfree(fr_desc); goto err; } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 520a7e5a490b..0e537d8d0e47 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( unsigned long val; int ret; - ret = strict_strtoul(page, 0, &val); + ret = kstrtoul(page, 0, &val); if (ret < 0) { - pr_err("strict_strtoul() failed with ret: %d\n", ret); + pr_err("kstrtoul() failed with ret: %d\n", ret); return -EINVAL; } if (val > MAX_SRPT_RDMA_SIZE) { @@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( unsigned long val; int ret; - ret = strict_strtoul(page, 0, &val); + ret = kstrtoul(page, 0, &val); if (ret < 0) { - pr_err("strict_strtoul() failed with ret: %d\n", ret); + pr_err("kstrtoul() failed with ret: %d\n", ret); return -EINVAL; } if (val > MAX_SRPT_RSP_SIZE) { @@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size( unsigned long val; int ret; - ret = strict_strtoul(page, 0, &val); + ret = kstrtoul(page, 0, &val); if (ret < 0) { - pr_err("strict_strtoul() failed with ret: %d\n", ret); + pr_err("kstrtoul() failed with ret: %d\n", ret); return -EINVAL; } if (val > MAX_SRPT_SRQ_SIZE) { @@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); return -EINVAL; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 0c707e4f4eaf..a4c7306ff43d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 -BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); +#define GC_SECTORS_USED_SIZE 13 +#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) +BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); #include "journal.h" diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 4f6b5940e609..3f74b4b0747b 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) for (k = i->start; k < bset_bkey_last(i); k = next) { next = bkey_next(k); - printk(KERN_ERR "block %u key %zi/%u: ", set, + printk(KERN_ERR "block %u key %li/%u: ", set, (uint64_t *) k - i->d, i->keys); if (b->ops->key_dump) @@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, order); if (!out) { + struct page *outp; + BUG_ON(order > state->page_order); - out = page_address(mempool_alloc(state->pool, GFP_NOIO)); + outp = mempool_alloc(state->pool, GFP_NOIO); + out = page_address(outp); used_mempool = true; order = state->page_order; } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 98cc0a810a36..5f9c2a665ca5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) /* guard against overflow */ SET_GC_SECTORS_USED(g, min_t(unsigned, GC_SECTORS_USED(g) + KEY_SIZE(k), - (1 << 14) - 1)); + MAX_GC_SECTORS_USED)); BUG_ON(!GC_SECTORS_USED(g)); } @@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k, static size_t insert_u64s_remaining(struct btree *b) { - ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys); + long ret = bch_btree_keys_u64s_remaining(&b->keys); /* * Might land in the middle of an existing extent and have to split it diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 72cd213f213f..5d5d031cf381 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl) struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct bio *bio = op->bio, *n; - if (op->bypass) - return bch_data_invalidate(cl); - if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { set_gc_sectors(op->c); wake_up_gc(op->c); } + if (op->bypass) + return bch_data_invalidate(cl); + /* * Journal writes are marked REQ_FLUSH; if the original write was a * flush, it'll wait on the journal write. diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index c6ab69333a6d..d8458d477a12 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b) return MAP_CONTINUE; } -int bch_bset_print_stats(struct cache_set *c, char *buf) +static int bch_bset_print_stats(struct cache_set *c, char *buf) { struct bset_stats_op op; int ret; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fd3a2a14b587..4a6ca1cb2e78 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio) for (i = 0; i < conf->raid_disks * 2; i++) { int j; int size; + int uptodate; struct bio *b = r1_bio->bios[i]; if (b->bi_end_io != end_sync_read) continue; - /* fixup the bio for reuse */ + /* fixup the bio for reuse, but preserve BIO_UPTODATE */ + uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); bio_reset(b); + if (!uptodate) + clear_bit(BIO_UPTODATE, &b->bi_flags); b->bi_vcnt = vcnt; b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_sector = r1_bio->sector + @@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio) int j; struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; + int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); if (sbio->bi_end_io != end_sync_read) continue; + /* Now we can 'fixup' the BIO_UPTODATE flag */ + set_bit(BIO_UPTODATE, &sbio->bi_flags); - if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { + if (uptodate) { for (j = vcnt; j-- ; ) { struct page *p, *s; p = pbio->bi_io_vec[j].bv_page; @@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio) if (j >= 0) atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) - && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { + && uptodate)) { /* No need to write to this device. */ sbio->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[i].rdev, mddev); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f1feadeb7bb2..16f5c21963db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) return sectors * (raid_disks - conf->max_degraded); } +static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) +{ + safe_put_page(percpu->spare_page); + kfree(percpu->scribble); + percpu->spare_page = NULL; + percpu->scribble = NULL; +} + +static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) +{ + if (conf->level == 6 && !percpu->spare_page) + percpu->spare_page = alloc_page(GFP_KERNEL); + if (!percpu->scribble) + percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); + + if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { + free_scratch_buffer(conf, percpu); + return -ENOMEM; + } + + return 0; +} + static void raid5_free_percpu(struct r5conf *conf) { - struct raid5_percpu *percpu; unsigned long cpu; if (!conf->percpu) return; - get_online_cpus(); - for_each_possible_cpu(cpu) { - percpu = per_cpu_ptr(conf->percpu, cpu); - safe_put_page(percpu->spare_page); - kfree(percpu->scribble); - } #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&conf->cpu_notify); #endif + + get_online_cpus(); + for_each_possible_cpu(cpu) + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); put_online_cpus(); free_percpu(conf->percpu); @@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (conf->level == 6 && !percpu->spare_page) - percpu->spare_page = alloc_page(GFP_KERNEL); - if (!percpu->scribble) - percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); - - if (!percpu->scribble || - (conf->level == 6 && !percpu->spare_page)) { - safe_put_page(percpu->spare_page); - kfree(percpu->scribble); + if (alloc_scratch_buffer(conf, percpu)) { pr_err("%s: failed memory allocation for cpu%ld\n", __func__, cpu); return notifier_from_errno(-ENOMEM); @@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, break; case CPU_DEAD: case CPU_DEAD_FROZEN: - safe_put_page(percpu->spare_page); - kfree(percpu->scribble); - percpu->spare_page = NULL; - percpu->scribble = NULL; + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); break; default: break; @@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, static int raid5_alloc_percpu(struct r5conf *conf) { unsigned long cpu; - struct page *spare_page; - struct raid5_percpu __percpu *allcpus; - void *scribble; - int err; + int err = 0; - allcpus = alloc_percpu(struct raid5_percpu); - if (!allcpus) + conf->percpu = alloc_percpu(struct raid5_percpu); + if (!conf->percpu) return -ENOMEM; - conf->percpu = allcpus; + +#ifdef CONFIG_HOTPLUG_CPU + conf->cpu_notify.notifier_call = raid456_cpu_notify; + conf->cpu_notify.priority = 0; + err = register_cpu_notifier(&conf->cpu_notify); + if (err) + return err; +#endif get_online_cpus(); - err = 0; for_each_present_cpu(cpu) { - if (conf->level == 6) { - spare_page = alloc_page(GFP_KERNEL); - if (!spare_page) { - err = -ENOMEM; - break; - } - per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; - } - scribble = kmalloc(conf->scribble_len, GFP_KERNEL); - if (!scribble) { - err = -ENOMEM; + err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); + if (err) { + pr_err("%s: failed memory allocation for cpu%ld\n", + __func__, cpu); break; } - per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; } -#ifdef CONFIG_HOTPLUG_CPU - conf->cpu_notify.notifier_call = raid456_cpu_notify; - conf->cpu_notify.priority = 0; - if (err == 0) - err = register_cpu_notifier(&conf->cpu_notify); -#endif put_online_cpus(); return err; diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 8f8a6b327cdb..2c2c9cc75231 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) if (rc != 0) { dev_err(&pci_dev->dev, "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); + kfree(dma_map); return rc; } diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1ee2b9492a82..9b809cfc2899 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -908,7 +908,6 @@ void mei_cl_all_disconnect(struct mei_device *dev) list_for_each_entry_safe(cl, next, &dev->file_list, link) { cl->state = MEI_FILE_DISCONNECTED; cl->mei_flow_ctrl_creds = 0; - cl->read_cb = NULL; cl->timer_count = 0; } } @@ -942,8 +941,16 @@ void mei_cl_all_wakeup(struct mei_device *dev) void mei_cl_all_write_clear(struct mei_device *dev) { struct mei_cl_cb *cb, *next; + struct list_head *list; - list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { + list = &dev->write_list.list; + list_for_each_entry_safe(cb, next, list, list) { + list_del(&cb->list); + mei_io_cb_free(cb); + } + + list = &dev->write_waiting_list.list; + list_for_each_entry_safe(cb, next, list, list) { list_del(&cb->list); mei_io_cb_free(cb); } diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index 752ff873f891..7e1ef0ebbb80 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c @@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, static int _mic_virtio_copy(struct mic_vdev *mvdev, struct mic_copy_desc *copy) { - int ret = 0, iovcnt = copy->iovcnt; + int ret = 0; + u32 iovcnt = copy->iovcnt; struct iovec iov; struct iovec __user *u_iov = copy->iov; void __user *ubuf = NULL; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 357bbc54fe4b..3e049c13429c 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; mq->queue = blk_init_queue(mmc_request_fn, lock); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f342278539d5..494b888a6568 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -139,7 +139,7 @@ config MACVTAP This adds a specialized tap character device driver that is based on the MAC-VLAN network interface, called macvtap. A macvtap device can be added in the same way as a macvlan device, using 'type - macvlan', and then be accessed through the tap user space interface. + macvtap', and then be accessed through the tap user space interface. To compile this driver as a module, choose M here: the module will be called macvtap. diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index e9edd8473df6..e362ff720e6b 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1797,8 +1797,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) BOND_AD_INFO(bond).agg_select_timer = timeout; } -static u16 aggregator_identifier; - /** * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures * @bond: bonding struct to work on @@ -1812,7 +1810,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution) if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), bond->dev->dev_addr)) { - aggregator_identifier = 0; + BOND_AD_INFO(bond).aggregator_identifier = 0; BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); @@ -1881,7 +1879,7 @@ void bond_3ad_bind_slave(struct slave *slave) ad_initialize_agg(aggregator); aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); - aggregator->aggregator_identifier = (++aggregator_identifier); + aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; aggregator->slave = slave; aggregator->is_active = 0; aggregator->num_of_ports = 0; diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 3b97fe487dca..bb03b1df2f3e 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -253,6 +253,7 @@ struct ad_system { struct ad_bond_info { struct ad_system system; /* 802.3ad system structure */ u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ + u16 aggregator_identifier; }; struct ad_slave_info { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ac4a1b88115e..afae7cab5cf6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1538,9 +1538,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_set_carrier(bond); if (USES_PRIMARY(bond->params.mode)) { + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); } pr_info("%s: Enslaving %s as %s interface with %s link\n", @@ -1566,10 +1568,12 @@ err_detach: if (bond->primary_slave == new_slave) bond->primary_slave = NULL; if (bond->curr_active_slave == new_slave) { + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_change_active_slave(bond, NULL); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); } slave_disable_netpoll(new_slave); @@ -2858,9 +2862,12 @@ static int bond_slave_netdev_event(unsigned long event, pr_info("%s: Primary slave changed to %s, reselecting active slave\n", bond->dev->name, bond->primary_slave ? slave_dev->name : "none"); + + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); break; case NETDEV_FEAT_CHANGE: bond_compute_features(bond); @@ -3683,7 +3690,7 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 5f997b9af54d..23f365510b58 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -14,7 +14,7 @@ #include <linux/errno.h> #include <linux/if.h> #include <linux/netdevice.h> -#include <linux/rwlock.h> +#include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/ctype.h> #include <linux/inet.h> diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 6c859bba8b65..e77d11049747 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev) return err; dev->nchannels = msg.u.cardinfo.nchannels; + if (dev->nchannels > MAX_NET_DEVICES) + return -EINVAL; return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8363b9de5004..5ee13af78e53 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1867,7 +1867,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct bnx2x *bp = netdev_priv(dev); @@ -1889,7 +1889,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); } void bnx2x_set_num_queues(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 5135cc7f7b6f..ec02b15fba32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + void *accel_priv, select_queue_fallback_t fallback); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index add05f14b38b..1642de78aac8 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); + pci_disable_device(pdev); /* pci_power_off (pdev, -1); */ } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4782b42401b..903362a7b584 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; - napi_enable(&fep->napi); - /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. */ @@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev) fec_enet_free_buffers(ndev); return ret; } + + napi_enable(&fep->napi); phy_start(fep->phy_dev); netif_start_queue(ndev); fep->opened = 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d4ada72dfd0..18076c4178b4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) } static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; #ifdef IXGBE_FCOE @@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) break; default: - return __netdev_pick_tx(dev, skb); + return fallback(dev, skb); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, return txq + f->offset; #else - return __netdev_pick_tx(dev, skb); + return fallback(dev, skb); #endif } diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 8f9266c64c75..fd4b6aecf6ee 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev) static u16 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { /* we are currently only using the first queue */ return 0; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 6300fd27f2db..68e6a6613e9a 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -43,12 +43,12 @@ config MVMDIO This driver is used by the MV643XX_ETH and MVNETA drivers. config MVNETA - tristate "Marvell Armada 370/XP network interface support" - depends on MACH_ARMADA_370_XP + tristate "Marvell Armada 370/38x/XP network interface support" + depends on PLAT_ORION select MVMDIO ---help--- This driver supports the network interface units in the - Marvell ARMADA XP and ARMADA 370 SoC family. + Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family. Note that this driver is distinct from the mv643xx_eth driver, which should be used for the older Marvell SoCs diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8e8a7eb43a2c..13457032d15f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; @@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, if (vlan_tx_tag_present(skb)) up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; - return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; + return fallback(dev, skb) % rings_p_up + up * rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 3af04c3f42ea..9ca223bc90fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 157fe8df2c3e..8ff57e8e3e91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -4,5 +4,5 @@ config MLX5_CORE tristate - depends on PCI && X86 + depends on PCI default n diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index e2f202e3932f..f2d7c702c77f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -37,6 +37,17 @@ config DWMAC_SUNXI stmmac device driver. This driver is used for A20/A31 GMAC ethernet controller. +config DWMAC_STI + bool "STi GMAC support" + depends on STMMAC_PLATFORM && ARCH_STI + default y + ---help--- + Support for ethernet controller on STi SOCs. + + This selects STi SoC glue layer support for the stmmac + device driver. This driver is used on for the STi series + SOCs GMAC ethernet controller. + config STMMAC_PCI bool "STMMAC PCI bus support" depends on STMMAC_ETH && PCI diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index ecadecea79b2..dcef28775dad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o +stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c new file mode 100644 index 000000000000..552bbc17863c --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -0,0 +1,330 @@ +/** + * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer + * + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/stmmac.h> +#include <linux/phy.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_net.h> + +/** + * STi GMAC glue logic. + * -------------------- + * + * _ + * | \ + * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK + * phyclk | |___________________________________________ + * | | | (phyclk-in) + * --------|1 / | + * int-clk |_ / | + * | _ + * | | \ + * |_______|1 \ ETH_SEL_TX_RETIME_CLK + * | |___________________________ + * | | (tx-retime-clk) + * _______|0 / + * | |_ / + * _ | + * | \ | + * --------|0 \ | + * clk_125 | |__| + * | | ETH_SEL_TXCLK_NOT_CLK125 + * --------|1 / + * txclk |_ / + * + * + * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can + * generate 50MHz clock or MAC can generate it. + * This bit is configured by "st,ext-phyclk" property. + * + * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz + * clock either comes from clk-125 pin or txclk pin. This configuration is + * totally driven by the board wiring. This bit is configured by + * "st,tx-retime-src" property. + * + * TXCLK configuration is different for different phy interface modes + * and changes according to link speed in modes like RGMII. + * + * Below table summarizes the clock requirement and clock sources for + * supported phy interface modes with link speeds. + * ________________________________________________ + *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link | + * ------------------------------------------------ + *| MII | n/a | 25Mhz | + *| | | txclk | + * ------------------------------------------------ + *| GMII | 125Mhz | 25Mhz | + *| | clk-125/txclk | txclk | + * ------------------------------------------------ + *| RGMII | 125Mhz | 25Mhz | + *| | clk-125/txclk | clkgen | + * ------------------------------------------------ + *| RMII | n/a | 25Mhz | + *| | |clkgen/phyclk-in | + * ------------------------------------------------ + * + * TX lines are always retimed with a clk, which can vary depending + * on the board configuration. Below is the table of these bits + * in eth configuration register depending on source of retime clk. + * + *--------------------------------------------------------------- + * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125| + *--------------------------------------------------------------- + * txclk | 0 | n/a | 1 | + *--------------------------------------------------------------- + * ck_125| 0 | n/a | 0 | + *--------------------------------------------------------------- + * phyclk| 1 | 0 | n/a | + *--------------------------------------------------------------- + * clkgen| 1 | 1 | n/a | + *--------------------------------------------------------------- + */ + + /* Register definition */ + + /* 3 bits [8:6] + * [6:6] ETH_SEL_TXCLK_NOT_CLK125 + * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK + * [8:8] ETH_SEL_TX_RETIME_CLK + * + */ + +#define TX_RETIME_SRC_MASK GENMASK(8, 6) +#define ETH_SEL_TX_RETIME_CLK BIT(8) +#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) +#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6) + +#define ENMII_MASK GENMASK(5, 5) +#define ENMII BIT(5) + +/** + * 3 bits [4:2] + * 000-GMII/MII + * 001-RGMII + * 010-SGMII + * 100-RMII +*/ +#define MII_PHY_SEL_MASK GENMASK(4, 2) +#define ETH_PHY_SEL_RMII BIT(4) +#define ETH_PHY_SEL_SGMII BIT(3) +#define ETH_PHY_SEL_RGMII BIT(2) +#define ETH_PHY_SEL_GMII 0x0 +#define ETH_PHY_SEL_MII 0x0 + +#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ + iface == PHY_INTERFACE_MODE_RGMII_ID || \ + iface == PHY_INTERFACE_MODE_RGMII_RXID || \ + iface == PHY_INTERFACE_MODE_RGMII_TXID) + +#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ + iface == PHY_INTERFACE_MODE_GMII) + +struct sti_dwmac { + int interface; + bool ext_phyclk; + bool is_tx_retime_src_clk_125; + struct clk *clk; + int reg; + struct device *dev; + struct regmap *regmap; +}; + +static u32 phy_intf_sels[] = { + [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, + [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, + [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, + [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, + [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, + [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, +}; + +enum { + TX_RETIME_SRC_NA = 0, + TX_RETIME_SRC_TXCLK = 1, + TX_RETIME_SRC_CLK_125, + TX_RETIME_SRC_PHYCLK, + TX_RETIME_SRC_CLKGEN, +}; + +static const char *const tx_retime_srcs[] = { + [TX_RETIME_SRC_NA] = "", + [TX_RETIME_SRC_TXCLK] = "txclk", + [TX_RETIME_SRC_CLK_125] = "clk_125", + [TX_RETIME_SRC_PHYCLK] = "phyclk", + [TX_RETIME_SRC_CLKGEN] = "clkgen", +}; + +static u32 tx_retime_val[] = { + [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125, + [TX_RETIME_SRC_CLK_125] = 0x0, + [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK, + [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK | + ETH_SEL_INTERNAL_NOTEXT_PHYCLK, +}; + +static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd) +{ + u32 src = 0, freq = 0; + + if (spd == SPEED_100) { + if (dwmac->interface == PHY_INTERFACE_MODE_MII || + dwmac->interface == PHY_INTERFACE_MODE_GMII) { + src = TX_RETIME_SRC_TXCLK; + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + if (dwmac->ext_phyclk) { + src = TX_RETIME_SRC_PHYCLK; + } else { + src = TX_RETIME_SRC_CLKGEN; + freq = 50000000; + } + + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + src = TX_RETIME_SRC_CLKGEN; + freq = 25000000; + } + + if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk) + clk_set_rate(dwmac->clk, freq); + + } else if (spd == SPEED_1000) { + if (dwmac->is_tx_retime_src_clk_125) + src = TX_RETIME_SRC_CLK_125; + else + src = TX_RETIME_SRC_TXCLK; + } + + regmap_update_bits(dwmac->regmap, dwmac->reg, + TX_RETIME_SRC_MASK, tx_retime_val[src]); +} + +static void sti_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + + if (dwmac->clk) + clk_disable_unprepare(dwmac->clk); +} + +static void sti_fix_mac_speed(void *priv, unsigned int spd) +{ + struct sti_dwmac *dwmac = priv; + + setup_retime_src(dwmac, spd); + + return; +} + +static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, + struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct regmap *regmap; + int err; + + if (!np) + return -EINVAL; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf"); + if (!res) + return -ENODATA; + + regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + dwmac->dev = dev; + dwmac->interface = of_get_phy_mode(np); + dwmac->regmap = regmap; + dwmac->reg = res->start; + dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); + dwmac->is_tx_retime_src_clk_125 = false; + + if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { + const char *rs; + + err = of_property_read_string(np, "st,tx-retime-src", &rs); + if (err < 0) { + dev_err(dev, "st,tx-retime-src not specified\n"); + return err; + } + + if (!strcasecmp(rs, "clk_125")) + dwmac->is_tx_retime_src_clk_125 = true; + } + + dwmac->clk = devm_clk_get(dev, "sti-ethclk"); + + if (IS_ERR(dwmac->clk)) + dwmac->clk = NULL; + + return 0; +} + +static int sti_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + struct regmap *regmap = dwmac->regmap; + int iface = dwmac->interface; + u32 reg = dwmac->reg; + u32 val, spd; + + if (dwmac->clk) + clk_prepare_enable(dwmac->clk); + + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); + + val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; + regmap_update_bits(regmap, reg, ENMII_MASK, val); + + if (IS_PHY_IF_MODE_GBIT(iface)) + spd = SPEED_1000; + else + spd = SPEED_100; + + setup_retime_src(dwmac, spd); + + return 0; +} + +static void *sti_dwmac_setup(struct platform_device *pdev) +{ + struct sti_dwmac *dwmac; + int ret; + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return ERR_PTR(-ENOMEM); + + ret = sti_dwmac_parse_data(dwmac, pdev); + if (ret) { + dev_err(&pdev->dev, "Unable to parse OF data\n"); + return ERR_PTR(ret); + } + + return dwmac; +} + +const struct stmmac_of_data sti_gmac_data = { + .fix_mac_speed = sti_fix_mac_speed, + .setup = sti_dwmac_setup, + .init = sti_dwmac_init, + .exit = sti_dwmac_exit, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index d9af26ed58ee..f9e60d7918c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv); #ifdef CONFIG_DWMAC_SUNXI extern const struct stmmac_of_data sun7i_gmac_data; #endif +#ifdef CONFIG_DWMAC_STI +extern const struct stmmac_of_data sti_gmac_data; +#endif extern struct platform_driver stmmac_pltfr_driver; static inline int stmmac_register_platform(void) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5884a7d2063b..c61bc72b8e90 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = { #ifdef CONFIG_DWMAC_SUNXI { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, #endif +#ifdef CONFIG_DWMAC_STI + { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, + { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, + { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, +#endif /* SoC specific glue layers should come before generic bindings */ { .compatible = "st,spear600-gmac"}, { .compatible = "snps,dwmac-3.610"}, diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 39d12535c3c6..0b6a2802d51b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) * common for both the interface as the interface shares * the same hardware resource. */ - for (i = 0; i <= priv->data.slaves; i++) + for (i = 0; i < priv->data.slaves; i++) if (priv->slaves[i].ndev->flags & IFF_PROMISC) flag = true; @@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) unsigned long timeout = jiffies + HZ; /* Disable Learn for all ports */ - for (i = 0; i <= priv->data.slaves; i++) { + for (i = 0; i < priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 1); cpsw_ale_control_set(ale, i, @@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); /* Enable Learn for all ports */ - for (i = 0; i <= priv->data.slaves; i++) { + for (i = 0; i < priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 0); cpsw_ale_control_set(ale, i, @@ -1892,6 +1892,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); slave_data->phy_if = of_get_phy_mode(slave_node); + if (slave_data->phy_if < 0) { + pr_err("Missing or malformed slave[%d] phy-mode property\n", + i); + return slave_data->phy_if; + } if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 023237a65720..17503da9f7a5 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) /* Return subqueue id on this core (one per core). */ static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { return smp_processor_id(); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 1ec65feebb9e..4bfdf8c7ada0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -26,6 +26,7 @@ #include <linux/netdevice.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> +#include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/skbuff.h> #include <linux/spinlock.h> @@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev) size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; packets++; - lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; + ++lp->tx_bd_ci; + lp->tx_bd_ci %= TX_BD_NUM; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; status = cur_p->status; } @@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_headlen(skb), DMA_TO_DEVICE); for (ii = 0; ii < num_frag; ii++) { - lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; + ++lp->tx_bd_tail; + lp->tx_bd_tail %= TX_BD_NUM; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; cur_p->phys = dma_map_single(ndev->dev.parent, @@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; /* Start the transfer */ axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); - lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; + ++lp->tx_bd_tail; + lp->tx_bd_tail %= TX_BD_NUM; return NETDEV_TX_OK; } @@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev) cur_p->status = 0; cur_p->sw_id_offset = (u32) new_skb; - lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; + ++lp->rx_bd_ci; + lp->rx_bd_ci %= RX_BD_NUM; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1eadc136a372..bcd2df2f406a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; + struct netvsc_device *nvdev; + struct rndis_device *rdev; int ret = 0; + netif_carrier_off(net); + /* Open up the device */ ret = rndis_filter_open(device_obj); if (ret != 0) { @@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net) netif_start_queue(net); + nvdev = hv_get_drvdata(device_obj); + rdev = nvdev->extension; + if (!rdev->link_state) + netif_carrier_on(net); + return ret; } @@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct net_device *net; struct net_device_context *ndev_ctx; struct netvsc_device *net_device; + struct rndis_device *rdev; net_device = hv_get_drvdata(device_obj); + rdev = net_device->extension; + + rdev->link_state = status != 1; + net = net_device->ndev; - if (!net) { - netdev_err(net, "got link status but net device " - "not initialized yet\n"); + if (!net || net->reg_state != NETREG_REGISTERED) return; - } + ndev_ctx = netdev_priv(net); if (status == 1) { - netif_carrier_on(net); - ndev_ctx = netdev_priv(net); schedule_delayed_work(&ndev_ctx->dwork, 0); schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); } else { - netif_carrier_off(net); + schedule_delayed_work(&ndev_ctx->dwork, 0); } } @@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = { * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add * another netif_notify_peers() into a delayed work, otherwise GARP packet * will not be sent after quick migration, and cause network disconnection. + * Also, we update the carrier status here. */ -static void netvsc_send_garp(struct work_struct *w) +static void netvsc_link_change(struct work_struct *w) { struct net_device_context *ndev_ctx; struct net_device *net; struct netvsc_device *net_device; + struct rndis_device *rdev; + bool notify; + + rtnl_lock(); ndev_ctx = container_of(w, struct net_device_context, dwork.work); net_device = hv_get_drvdata(ndev_ctx->device_ctx); + rdev = net_device->extension; net = net_device->ndev; - netdev_notify_peers(net); + + if (rdev->link_state) { + netif_carrier_off(net); + notify = false; + } else { + netif_carrier_on(net); + notify = true; + } + + rtnl_unlock(); + + if (notify) + netdev_notify_peers(net); } @@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev, if (!net) return -ENOMEM; - /* Set initial state */ - netif_carrier_off(net); - net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; hv_set_drvdata(dev, net); - INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); + INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); net->netdev_ops = &device_ops; @@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev, } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); - netif_carrier_on(net); - ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 177441afeb96..24b6dddd7f2f 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty) sirdev_put_instance(priv->dev); /* Stop tty */ - irtty_stop_receiver(tty, TRUE); clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); if (tty->ops->stop) tty->ops->stop(tty); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 25685e3eb472..44227c25a276 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -872,14 +872,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, dev->priv_flags |= IFF_MACVLAN; err = netdev_upper_dev_link(lowerdev, dev); if (err) - goto destroy_port; - + goto unregister_netdev; list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; +unregister_netdev: + unregister_netdevice(dev); destroy_port: port->count -= 1; if (!port->count) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 9414fa272160..98e7cbf720a5 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1006,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev) } else list_add_tail(&dp83640->list, &clock->phylist); - if (clock->chosen && !list_empty(&clock->phylist)) - recalibrate(clock); - else - enable_broadcast(dp83640->phydev, clock->page, 1); - dp83640_clock_put(clock); return 0; @@ -1063,6 +1058,14 @@ static void dp83640_remove(struct phy_device *phydev) static int dp83640_config_init(struct phy_device *phydev) { + struct dp83640_private *dp83640 = phydev->priv; + struct dp83640_clock *clock = dp83640->clock; + + if (clock->chosen && !list_empty(&clock->phylist)) + recalibrate(clock); + else + enable_broadcast(phydev, clock->page, 1); + enable_status_frames(phydev, true); ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); return 0; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index adb46de7c90d..aea92f02401b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1642,7 +1642,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 44c4db8450f0..8fe9cb7d0f72 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) * hope the rxq no. may help here. */ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct tun_struct *tun = netdev_priv(dev); struct tun_flow_entry *e; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 409499fdb157..7e7269fd3707 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -296,7 +296,6 @@ config USB_NET_SR9800 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices" depends on USB_USBNET select CRC32 - default y ---help--- Say Y if you want to use one of the following 100Mbps USB Ethernet device based on the CoreChip-sz SR9800 chip. diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 9765a7d4766d..5d194093f3e1 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = { .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | + FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index d6f64dad05bc..955df81a4358 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + skb_trim(skb, skb->len - 4); memcpy(&rx_hdr, skb_tail_pointer(skb), 4); le32_to_cpus(&rx_hdr); diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index e4a8a93fbaf7..1cc24e6f23e2 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u32 size; u32 count; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + header = (struct gl_header *) skb->data; // get the packet count of the received skb diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index a305a7b2dae6..82d844a8ebd0 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { u8 status; - if (skb->len == 0) { - dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) { + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); return 0; } diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 0a85d9227775..4cbdb1307f3e 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c @@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) struct nc_trailer *trailer; u16 hdr_len, packet_len; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + if (!(skb->len & 0x01)) { netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", skb->len, dev->net->hard_header_len, dev->hard_mtu, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ff5c87128ffe..313cb6cd4848 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { __be16 proto; - /* usbnet rx_complete guarantees that skb->len is at least - * hard_header_len, so we can inspect the dest address without - * checking skb->len - */ + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + switch (skb->data[0] & 0xf0) { case 0x40: proto = htons(ETH_P_IP); @@ -732,6 +732,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index a48bc0f20c1a..524a47a28120 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind); */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index f17b9e02dd34..d9e7892262fa 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; struct sk_buff *ax_skb; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 8dd54a0f7b29..424db65e4396 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb) static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + while (skb->len > 0) { u32 header, align_count; struct sk_buff *ax_skb; diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 4175eb9fdeca..b94a0fbb8b3b 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -63,6 +63,10 @@ static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { int offset = 0; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + while (offset + sizeof(u32) < skb->len) { struct sk_buff *sr_skb; u16 size; @@ -823,7 +827,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf) dev->rx_urb_size = SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size; } - netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__, + netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__, dev->rx_urb_size); return 0; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 4671da755e7b..dd10d5817d2a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) } // else network stack removes extra byte if we forced a short packet - if (skb->len) { - /* all data was already cloned from skb inside the driver */ - if (dev->driver_info->flags & FLAG_MULTI_PACKET) - dev_kfree_skb_any(skb); - else - usbnet_skb_return(dev, skb); + /* all data was already cloned from skb inside the driver */ + if (dev->driver_info->flags & FLAG_MULTI_PACKET) + goto done; + + if (skb->len < ETH_HLEN) { + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); + } else { + usbnet_skb_return(dev, skb); return; } - netif_dbg(dev, rx_err, dev->net, "drop\n"); - dev->net->stats.rx_errors++; done: skb_queue_tail(&dev->done, skb); } @@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb) switch (urb_status) { /* success */ case 0: - if (skb->len < dev->net->hard_header_len) { - state = rx_cleanup; - dev->net->stats.rx_errors++; - dev->net->stats.rx_length_errors++; - netif_dbg(dev, rx_err, dev->net, - "rx length %d\n", skb->len); - } break; /* stalls need manual reset. this is rare ... except that diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index d6bc7cb61bfb..1a2973b7acf2 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); if (ah->ah_version == AR5K_AR5210) { - srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; + srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf; ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; } else { srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff; diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c index aa7ad3a7a69b..4e5c0f8c9496 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/hostap/hostap_proc.c @@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local) void hostap_remove_proc(local_info_t *local) { - remove_proc_subtree(local->ddev->name, hostap_proc); + proc_remove(local->proc); } diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index c24d1d3d55f6..73086c1629ca 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return ret; } +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* disabled by default */ + return false; +} + static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + if (!iwl_enable_rx_ampdu(priv->cfg)) break; IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); @@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_START: if (!priv->trans->ops->txq_enable) break; - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + if (!iwl_enable_tx_ampdu(priv->cfg)) break; IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index c3728163be46..75103554cd63 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); MODULE_PARM_DESC(11n_disable, - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, int, S_IRUGO); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index 0a84ade7edac..b29075c3da8e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -79,9 +79,12 @@ enum iwl_power_level { IWL_POWER_NUM }; -#define IWL_DISABLE_HT_ALL BIT(0) -#define IWL_DISABLE_HT_TXAGG BIT(1) -#define IWL_DISABLE_HT_RXAGG BIT(2) +enum iwl_disable_11n { + IWL_DISABLE_HT_ALL = BIT(0), + IWL_DISABLE_HT_TXAGG = BIT(1), + IWL_DISABLE_HT_RXAGG = BIT(2), + IWL_ENABLE_HT_TXAGG = BIT(3), +}; /** * struct iwl_mod_params @@ -90,7 +93,7 @@ enum iwl_power_level { * * @sw_crypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, - * use IWL_DISABLE_HT_* constants + * use IWL_[DIS,EN]ABLE_HT_* constants * @amsdu_size_8K: enable 8K amsdu size, default = 0 * @restart_fw: restart firmware, default = 1 * @wd_disable: enable stuck queue check, default = 0 diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 6bf9766e5982..c35b8661b395 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ieee80211_free_txskb(hw, skb); } +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* enabled by default */ + return true; +} + static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { + if (!iwl_enable_rx_ampdu(mvm->cfg)) { ret = -EINVAL; break; } @@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); break; case IEEE80211_AMPDU_TX_START: - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { + if (!iwl_enable_tx_ampdu(mvm->cfg)) { ret = -EINVAL; break; } diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 4d79761b9c87..9d3d2758ec35 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { skb->priority = cfg80211_classify8021d(skb, NULL); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h index 56aee067f324..a6ad79f61bf9 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h @@ -15,6 +15,8 @@ #ifndef RTL8187_H #define RTL8187_H +#include <linux/cache.h> + #include "rtl818x.h" #include "leds.h" @@ -139,7 +141,10 @@ struct rtl8187_priv { u8 aifsn[4]; u8 rfkill_mask; struct { - __le64 buf; + union { + __le64 buf; + u8 dummy1[L1_CACHE_BYTES]; + } ____cacheline_aligned; struct sk_buff_head queue; } b_tx_status; /* This queue is used by both -b and non-b devices */ struct mutex io_mutex; @@ -147,7 +152,8 @@ struct rtl8187_priv { u8 bits8; __le16 bits16; __le32 bits32; - } *io_dmabuf; + u8 dummy2[L1_CACHE_BYTES]; + } *io_dmabuf ____cacheline_aligned; bool rfkill_off; u16 seqno; }; diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index deedae3c5449..d1c0191a195b 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw) /*<2> Enable Adapter */ if (rtlpriv->cfg->ops->hw_init(hw)) - return 1; + return false; RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); /*<3> Enable Interrupt */ diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index a82b30a1996c..2eb0b38384dd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) bool is92c; int err; u8 tmp_u1b; + unsigned long flags; rtlpci->being_init_adapter = true; + + /* Since this function can take a very long time (up to 350 ms) + * and can be called with irqs disabled, reenable the irqs + * to let the other devices continue being serviced. + * + * It is safe doing so since our own interrupts will only be enabled + * in a subsequent step. + */ + local_save_flags(flags); + local_irq_enable(); + rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl92ce_init_mac(hw); if (!rtstatus) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); err = 1; - return err; + goto exit; } err = rtl92c_download_fw(hw); @@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; - return err; + goto exit; } rtlhal->last_hmeboxnum = 0; @@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); } rtl92c_dm_init(hw); +exit: + local_irq_restore(flags); rtlpci->being_init_adapter = false; return err; } diff --git a/drivers/of/address.c b/drivers/of/address.c index d3dd41c840f1..1a54f1ffaadb 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr) static int of_bus_pci_match(struct device_node *np) { /* + * "pciex" is PCI Express * "vci" is for the /chaos bridge on 1st-gen PCI powermacs * "ht" is hypertransport */ - return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") || - !strcmp(np->type, "ht"); + return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || + !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); } static void of_bus_pci_count_cells(struct device_node *np, diff --git a/drivers/of/base.c b/drivers/of/base.c index ff85450d5683..10b51106c854 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -730,46 +730,64 @@ out: } EXPORT_SYMBOL(of_find_node_with_property); -static -const struct of_device_id *__of_match_node(const struct of_device_id *matches, - const struct device_node *node) +static const struct of_device_id * +of_match_compatible(const struct of_device_id *matches, + const struct device_node *node) { const char *cp; int cplen, l; - - if (!matches) - return NULL; + const struct of_device_id *m; cp = __of_get_property(node, "compatible", &cplen); - do { - const struct of_device_id *m = matches; - - /* Check against matches with current compatible string */ + while (cp && (cplen > 0)) { + m = matches; while (m->name[0] || m->type[0] || m->compatible[0]) { - int match = 1; - if (m->name[0]) - match &= node->name - && !strcmp(m->name, node->name); - if (m->type[0]) - match &= node->type - && !strcmp(m->type, node->type); - if (m->compatible[0]) - match &= cp - && !of_compat_cmp(m->compatible, cp, - strlen(m->compatible)); - if (match) + /* Only match for the entries without type and name */ + if (m->name[0] || m->type[0] || + of_compat_cmp(m->compatible, cp, + strlen(m->compatible))) + m++; + else return m; - m++; } - /* Get node's next compatible string */ - if (cp) { - l = strlen(cp) + 1; - cp += l; - cplen -= l; - } - } while (cp && (cplen > 0)); + /* Get node's next compatible string */ + l = strlen(cp) + 1; + cp += l; + cplen -= l; + } + + return NULL; +} + +static +const struct of_device_id *__of_match_node(const struct of_device_id *matches, + const struct device_node *node) +{ + const struct of_device_id *m; + if (!matches) + return NULL; + + m = of_match_compatible(matches, node); + if (m) + return m; + + while (matches->name[0] || matches->type[0] || matches->compatible[0]) { + int match = 1; + if (matches->name[0]) + match &= node->name + && !strcmp(matches->name, node->name); + if (matches->type[0]) + match &= node->type + && !strcmp(matches->type, node->type); + if (matches->compatible[0]) + match &= __of_device_is_compatible(node, + matches->compatible); + if (match) + return matches; + matches++; + } return NULL; } @@ -778,10 +796,12 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches, * @matches: array of of device match structures to search in * @node: the of device structure to match against * - * Low level utility function used by device matching. Matching order - * is to compare each of the node's compatibles with all given matches - * first. This implies node's compatible is sorted from specific to - * generic while matches can be in any order. + * Low level utility function used by device matching. We have two ways + * of matching: + * - Try to find the best compatible match by comparing each compatible + * string of device node with all the given matches respectively. + * - If the above method failed, then try to match the compatible by using + * __of_device_is_compatible() besides the match in type and name. */ const struct of_device_id *of_match_node(const struct of_device_id *matches, const struct device_node *node) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 875b7b6f0d2a..5b3c24f3cde5 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -24,7 +24,11 @@ MODULE_LICENSE("GPL"); static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) { - phydev->supported |= PHY_DEFAULT_FEATURES; + /* The default values for phydev->supported are provided by the PHY + * driver "features" member, we want to reset to sane defaults fist + * before supporting higher speeds. + */ + phydev->supported &= PHY_DEFAULT_FEATURES; switch (max_speed) { default: @@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi { struct phy_device *phy; bool is_c45; - int rc, prev_irq; + int rc; u32 max_speed = 0; is_c45 = of_device_is_compatible(child, @@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi if (!phy || IS_ERR(phy)) return 1; - if (mdio->irq) { - prev_irq = mdio->irq[addr]; - mdio->irq[addr] = - irq_of_parse_and_map(child, 0); - if (!mdio->irq[addr]) - mdio->irq[addr] = prev_irq; + rc = irq_of_parse_and_map(child, 0); + if (rc > 0) { + phy->irq = rc; + if (mdio->irq) + mdio->irq[addr] = rc; + } else { + if (mdio->irq) + phy->irq = mdio->irq[addr]; } /* Associate the OF node with the device structure so it diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index e2a783fdb98f..7c7a388c85ab 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -730,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) return (unsigned int)sta; } +static inline bool device_status_valid(unsigned int sta) +{ + /* + * ACPI spec says that _STA may return bit 0 clear with bit 3 set + * if the device is valid but does not require a device driver to be + * loaded (Section 6.3.7 of ACPI 5.0A). + */ + unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING; + return (sta & mask) == mask; +} + /** * trim_stale_devices - remove PCI devices that are not responding. * @dev: PCI device to start walking the hierarchy from. @@ -745,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev) unsigned long long sta; status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); - alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) + alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) || acpiphp_no_hotplug(handle); } if (!alive) { @@ -792,7 +803,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) mutex_lock(&slot->crit_sect); if (slot_no_hotplug(slot)) { ; /* do nothing */ - } else if (get_slot_status(slot) == ACPI_STA_ALL) { + } else if (device_status_valid(get_slot_status(slot))) { /* remove stale devices if any */ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 645c867c1257..5f5b0f4be5be 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -162,6 +162,9 @@ int phy_init(struct phy *phy) { int ret; + if (!phy) + return 0; + ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; @@ -187,6 +190,9 @@ int phy_exit(struct phy *phy) { int ret; + if (!phy) + return 0; + ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; @@ -212,6 +218,9 @@ int phy_power_on(struct phy *phy) { int ret; + if (!phy) + return 0; + ret = phy_pm_runtime_get_sync(phy); if (ret < 0 && ret != -ENOTSUPP) return ret; @@ -240,6 +249,9 @@ int phy_power_off(struct phy *phy) { int ret; + if (!phy) + return 0; + mutex_lock(&phy->mutex); if (phy->power_count == 1 && phy->ops->power_off) { ret = phy->ops->power_off(phy); @@ -308,7 +320,7 @@ err0: */ void phy_put(struct phy *phy) { - if (IS_ERR(phy)) + if (!phy || IS_ERR(phy)) return; module_put(phy->ops->owner); @@ -328,6 +340,9 @@ void devm_phy_put(struct device *dev, struct phy *phy) { int r; + if (!phy) + return; + r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); } @@ -411,6 +426,27 @@ struct phy *phy_get(struct device *dev, const char *string) EXPORT_SYMBOL_GPL(phy_get); /** + * phy_optional_get() - lookup and obtain a reference to an optional phy. + * @dev: device that requests this phy + * @string: the phy name as given in the dt data or the name of the controller + * port for non-dt case + * + * Returns the phy driver, after getting a refcount to it; or + * NULL if there is no such phy. The caller is responsible for + * calling phy_put() to release that count. + */ +struct phy *phy_optional_get(struct device *dev, const char *string) +{ + struct phy *phy = phy_get(dev, string); + + if (PTR_ERR(phy) == -ENODEV) + phy = NULL; + + return phy; +} +EXPORT_SYMBOL_GPL(phy_optional_get); + +/** * devm_phy_get() - lookup and obtain a reference to a phy. * @dev: device that requests this phy * @string: the phy name as given in the dt data or phy device name @@ -441,6 +477,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string) EXPORT_SYMBOL_GPL(devm_phy_get); /** + * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. + * @dev: device that requests this phy + * @string: the phy name as given in the dt data or phy device name + * for non-dt case + * + * Gets the phy using phy_get(), and associates a device with it using + * devres. On driver detach, release function is invoked on the devres + * data, then, devres data is freed. This differs to devm_phy_get() in + * that if the phy does not exist, it is not considered an error and + * -ENODEV will not be returned. Instead the NULL phy is returned, + * which can be passed to all other phy consumer calls. + */ +struct phy *devm_phy_optional_get(struct device *dev, const char *string) +{ + struct phy *phy = devm_phy_get(dev, string); + + if (PTR_ERR(phy) == -ENODEV) + phy = NULL; + + return phy; +} +EXPORT_SYMBOL_GPL(devm_phy_optional_get); + +/** * phy_create() - create a new phy * @dev: device that is creating the new phy * @ops: function pointers for performing phy operations diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c index 563174891c90..041f9b638d28 100644 --- a/drivers/power/ds2782_battery.c +++ b/drivers/power/ds2782_battery.c @@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV) /* * Voltage is measured in units of 1.22mV. The voltage is stored as - * a 10-bit number plus sign, in the upper bits of a 16-bit register + * a 12-bit number plus sign, in the upper bits of a 16-bit register */ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); if (err) diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index 80edb7d8cb54..0b4cf9d63291 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c @@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev) ret = PTR_ERR(isp->phy); goto fail0; } - if (!isp->phy) - goto fail0; isp->dev = &pdev->dev; platform_set_drvdata(pdev, isp); diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c index c7ff6d67f158..0fbac861080d 100644 --- a/drivers/power/max17040_battery.c +++ b/drivers/power/max17040_battery.c @@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client) { struct max17040_chip *chip = i2c_get_clientdata(client); - if (chip->pdata->battery_online) + if (chip->pdata && chip->pdata->battery_online) chip->online = chip->pdata->battery_online(); else chip->online = 1; @@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client) { struct max17040_chip *chip = i2c_get_clientdata(client); - if (!chip->pdata->charger_online || !chip->pdata->charger_enable) { + if (!chip->pdata || !chip->pdata->charger_online + || !chip->pdata->charger_enable) { chip->status = POWER_SUPPLY_STATUS_UNKNOWN; return; } diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index 7f340206d329..b14ebdad5dd2 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c @@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev) /* Only LDO 5 and 6 has got the over current interrupt */ if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { irq = platform_get_irq_byname(pdev, "REGULATOR"); - irq = regmap_irq_get_virq(da9055->irq_data, irq); + if (irq < 0) + return irq; + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, da9055_ldo5_6_oc_irq, IRQF_TRIGGER_HIGH | diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index b1078ba3f393..186df8785a91 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c @@ -168,10 +168,11 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) MAX14577_REG_MAX); if (ret < 0) { dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); - return ret; } - return 0; + of_node_put(np); + + return ret; } static inline struct regulator_init_data *match_init_data(int index) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 9e80d61e5a3a..2eb97d7e8d12 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2595,8 +2595,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENOMEM; } - INIT_LIST_HEAD(&cmd->cmd_list); - memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->state = QLA_TGT_STATE_NEW; cmd->tgt = vha->vha_tgt.qla_tgt; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 1d10eecad499..66e755cdde57 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -855,7 +855,6 @@ struct qla_tgt_cmd { uint16_t loop_id; /* to save extra sess dereferences */ struct qla_tgt *tgt; /* to save extra sess dereferences */ struct scsi_qla_host *vha; - struct list_head cmd_list; struct atio_from_isp atio; }; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7bd7f0d5f050..62ec84b42e31 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ba9310bc9acb..581ee2a8856b 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI def_tristate SPI_PXA2XX && PCI config SPI_RSPI - tristate "Renesas RSPI controller" + tristate "Renesas RSPI/QSPI controller" depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE help - SPI driver for Renesas RSPI blocks. + SPI driver for Renesas RSPI and QSPI blocks. config SPI_S3C24XX tristate "Samsung S3C24XX series SPI" diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index 50406306bc20..bae97ffec4b9 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c @@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev) init_completion(&hw->done); master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + if (hw->pdata->lsb) + master->mode_bits |= SPI_LSB_FIRST; master->num_chipselect = hw->pdata->num_cs; master->bus_num = hw->pdata->bus_num; hw->bitbang.master = hw->master; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 23756b0f9036..d0b28bba38be 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work) ret = master->transfer_one_message(master, master->cur_msg); if (ret) { dev_err(&master->dev, - "failed to transfer one message from queue: %d\n", ret); - master->cur_msg->status = ret; - spi_finalize_current_message(master); + "failed to transfer one message from queue\n"); return; } } diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 23948f167012..713a97226787 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, /* If size is not set, or set to 0, always return EOF. */ if (asma->size == 0) - goto out; + goto out_unlock; if (!asma->file) { ret = -EBADF; - goto out; + goto out_unlock; } - ret = asma->file->f_op->read(asma->file, buf, len, pos); - if (ret < 0) - goto out; + mutex_unlock(&ashmem_mutex); - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; + /* + * asma and asma->file are used outside the lock here. We assume + * once asma->file is set it will never be changed, and will not + * be destroyed until all references to the file are dropped and + * ashmem_release is called. + */ + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret >= 0) { + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + } + return ret; -out: +out_unlock: mutex_unlock(&ashmem_mutex); return ret; } @@ -498,6 +506,7 @@ out: static int set_name(struct ashmem_area *asma, void __user *name) { + int len; int ret = 0; char local_name[ASHMEM_NAME_LEN]; @@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name) * variable that does not need protection and later copy the local * variable to the structure member with lock held. */ - if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) - return -EFAULT; - + len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); + if (len < 0) + return len; + if (len == ASHMEM_NAME_LEN) + local_name[ASHMEM_NAME_LEN - 1] = '\0'; mutex_lock(&ashmem_mutex); /* cannot change an existing mapping's name */ - if (unlikely(asma->file)) { + if (unlikely(asma->file)) ret = -EINVAL; - goto out; - } - memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, - local_name, ASHMEM_NAME_LEN); - asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; -out: - mutex_unlock(&ashmem_mutex); + else + strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); + mutex_unlock(&ashmem_mutex); return ret; } diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c index af6cd370b30f..ee3a7380e53b 100644 --- a/drivers/staging/android/ion/compat_ion.c +++ b/drivers/staging/android/ion/compat_ion.c @@ -35,9 +35,14 @@ struct compat_ion_custom_data { compat_ulong_t arg; }; +struct compat_ion_handle_data { + compat_int_t handle; +}; + #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ struct compat_ion_allocation_data) -#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) +#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ + struct compat_ion_handle_data) #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ struct compat_ion_custom_data) @@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data( return err; } +static int compat_get_ion_handle_data( + struct compat_ion_handle_data __user *data32, + struct ion_handle_data __user *data) +{ + compat_int_t i; + int err; + + err = get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + static int compat_put_ion_allocation_data( struct compat_ion_allocation_data __user *data32, struct ion_allocation_data __user *data) @@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } case COMPAT_ION_IOC_FREE: { - struct compat_ion_allocation_data __user *data32; - struct ion_allocation_data __user *data; + struct compat_ion_handle_data __user *data32; + struct ion_handle_data __user *data; int err; data32 = compat_ptr(arg); @@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (data == NULL) return -EFAULT; - err = compat_get_ion_allocation_data(data32, data); + err = compat_get_ion_handle_data(data32, data); if (err) return err; diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c index 55b2002753f2..01cdc8aee898 100644 --- a/drivers/staging/android/ion/ion_dummy_driver.c +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -17,9 +17,11 @@ #include <linux/err.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/init.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/sizes.h> +#include <linux/io.h> #include "ion.h" #include "ion_priv.h" @@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = { }; struct ion_platform_data dummy_ion_pdata = { - .nr = 4, + .nr = ARRAY_SIZE(dummy_heaps), .heaps = dummy_heaps, }; @@ -69,7 +71,7 @@ static int __init ion_dummy_init(void) heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, GFP_KERNEL); if (!heaps) - return PTR_ERR(heaps); + return -ENOMEM; /* Allocate a dummy carveout heap */ @@ -128,6 +130,7 @@ err: } return err; } +device_initcall(ion_dummy_init); static void __exit ion_dummy_exit(void) { @@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void) return; } - -module_init(ion_dummy_init); -module_exit(ion_dummy_exit); - +__exitcall(ion_dummy_exit); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 296c74f98dc0..37e64d51394c 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap) init_waitqueue_head(&heap->waitqueue); heap->task = kthread_run(ion_heap_deferred_free, heap, "%s", heap->name); - sched_setscheduler(heap->task, SCHED_IDLE, ¶m); if (IS_ERR(heap->task)) { pr_err("%s: creating thread for deferred free failed\n", __func__); return PTR_RET(heap->task); } + sched_setscheduler(heap->task, SCHED_IDLE, ¶m); return 0; } diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index d98673981cc4..fc2e4fccf69d 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h @@ -17,6 +17,7 @@ #ifndef _ION_PRIV_H #define _ION_PRIV_H +#include <linux/device.h> #include <linux/dma-direction.h> #include <linux/kref.h> #include <linux/mm_types.h> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 7f0729130d65..9849f3963e75 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, info->page = page; info->order = orders[i]; + INIT_LIST_HEAD(&info->list); return info; } kfree(info); @@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap, struct list_head pages; struct page_info *info, *tmp_info; int i = 0; - long size_remaining = PAGE_ALIGN(size); + unsigned long size_remaining = PAGE_ALIGN(size); unsigned int max_order = orders[0]; if (align > PAGE_SIZE) return -EINVAL; + if (size / PAGE_SIZE > totalram_pages / 2) + return -ENOMEM; + INIT_LIST_HEAD(&pages); while (size_remaining > 0) { info = alloc_largest_available(sys_heap, buffer, size_remaining, diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index 585040be5f18..5aaf71d6974b 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h @@ -35,10 +35,27 @@ struct sw_sync_pt { u32 value; }; +#if IS_ENABLED(CONFIG_SW_SYNC) struct sw_sync_timeline *sw_sync_timeline_create(const char *name); void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); +#else +static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + return NULL; +} + +static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +} + +static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, + u32 value) +{ + return NULL; +} +#endif /* IS_ENABLED(CONFIG_SW_SYNC) */ #endif /* __KERNEL __ */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 38e5d3b5ed9b..3d05f662110b 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref) container_of(kref, struct sync_timeline, kref); unsigned long flags; - if (obj->ops->release_obj) - obj->ops->release_obj(obj); - spin_lock_irqsave(&sync_timeline_list_lock, flags); list_del(&obj->sync_timeline_list); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + kfree(obj); } void sync_timeline_destroy(struct sync_timeline *obj) { obj->destroyed = true; + smp_wmb(); /* - * If this is not the last reference, signal any children - * that their parent is going away. + * signal any children that their parent is going away. */ + sync_timeline_signal(obj); - if (!kref_put(&obj->kref, sync_timeline_free)) - sync_timeline_signal(obj); + kref_put(&obj->kref, sync_timeline_free); } EXPORT_SYMBOL(sync_timeline_destroy); diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 8dfdd2732bdc..95a2358267ba 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c @@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev) } static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { return ClassifyPacket(netdev_priv(dev), skb); } diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 246080316c90..5b15033a94bf 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device, ret = driver->auto_attach(dev, context); if (ret >= 0) ret = comedi_device_postconfig(dev); - if (ret < 0) - comedi_device_detach(dev); mutex_unlock(&dev->mutex); if (ret < 0) { diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 593676cf706a..d9ad2c0fdda2 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { struct pci1710_private *devpriv = dev->private; + unsigned int val; int n, chan, range, ofs; chan = CR_CHAN(insn->chanspec); @@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev, outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); ofs = PCI171x_DA1; } + val = devpriv->ao_data[chan]; - for (n = 0; n < insn->n; n++) - outw(data[n], dev->iobase + ofs); + for (n = 0; n < insn->n; n++) { + val = data[n]; + outw(val, dev->iobase + ofs); + } - devpriv->ao_data[chan] = data[n]; + devpriv->ao_data[chan] = val; return n; @@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { struct pci1710_private *devpriv = dev->private; + unsigned int val; int n, rangereg, chan; chan = CR_CHAN(insn->chanspec); @@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev, outb(rangereg, dev->iobase + PCI1720_RANGE); devpriv->da_ranges = rangereg; } + val = devpriv->ao_data[chan]; for (n = 0; n < insn->n; n++) { - outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1)); + val = data[n]; + outw(val, dev->iobase + PCI1720_DA0 + (chan << 1)); outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ } - devpriv->ao_data[chan] = data[n]; + devpriv->ao_data[chan] = val; return n; } diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index 3beeb1254152..88c60b6020c4 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -48,6 +48,7 @@ #include <linux/usb.h> #include <linux/fcntl.h> #include <linux/compiler.h> +#include <asm/unaligned.h> #include "comedi_fc.h" #include "../comedidev.h" @@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev, } /* 32 bits big endian from the A/D converter */ - val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1))); + val = be32_to_cpu(get_unaligned((uint32_t + *)(devpriv->insn_buf + 1))); val &= 0x00ffffff; /* strip status byte */ val ^= 0x00800000; /* convert to unsigned */ @@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan) return ret; /* 32 bits big endian from the A/D converter */ - val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1))); + val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1))); val &= 0x00ffffff; /* strip status byte */ val ^= 0x00800000; /* convert to unsigned */ diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c index 1f61b89eca44..33ac7fb88cbd 100644 --- a/drivers/staging/dgrp/dgrp_net_ops.c +++ b/drivers/staging/dgrp/dgrp_net_ops.c @@ -2232,177 +2232,6 @@ done: return rtn; } -/* - * Common Packet Handling code - */ - -static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch, - long dlen, long plen, int n1, u8 *dbuf) -{ - char *error; - long n; - long remain; - u8 *buf; - u8 *b; - - remain = nd->nd_remain; - nd->nd_tx_work = 1; - - /* - * Otherwise data should appear only when we are - * in the CS_READY state. - */ - - if (ch->ch_state < CS_READY) { - error = "Data received before RWIN established"; - nd->nd_remain = 0; - nd->nd_state = NS_SEND_ERROR; - nd->nd_error = error; - } - - /* - * Assure that the data received is within the - * allowable window. - */ - - n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; - - if (dlen > n) { - error = "Receive data overrun"; - nd->nd_remain = 0; - nd->nd_state = NS_SEND_ERROR; - nd->nd_error = error; - } - - /* - * If we received 3 or less characters, - * assume it is a human typing, and set RTIME - * to 10 milliseconds. - * - * If we receive 10 or more characters, - * assume its not a human typing, and set RTIME - * to 100 milliseconds. - */ - - if (ch->ch_edelay != DGRP_RTIME) { - if (ch->ch_rtime != ch->ch_edelay) { - ch->ch_rtime = ch->ch_edelay; - ch->ch_flag |= CH_PARAM; - } - } else if (dlen <= 3) { - if (ch->ch_rtime != 10) { - ch->ch_rtime = 10; - ch->ch_flag |= CH_PARAM; - } - } else { - if (ch->ch_rtime != DGRP_RTIME) { - ch->ch_rtime = DGRP_RTIME; - ch->ch_flag |= CH_PARAM; - } - } - - /* - * If a portion of the packet is outside the - * buffer, shorten the effective length of the - * data packet to be the amount of data received. - */ - - if (remain < plen) - dlen -= plen - remain; - - /* - * Detect if receive flush is now complete. - */ - - if ((ch->ch_flag & CH_RX_FLUSH) != 0 && - ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= - ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { - ch->ch_flag &= ~CH_RX_FLUSH; - } - - /* - * If we are ready to receive, move the data into - * the receive buffer. - */ - - ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; - - if (ch->ch_state == CS_READY && - (ch->ch_tun.un_open_count != 0) && - (ch->ch_tun.un_flag & UN_CLOSING) == 0 && - (ch->ch_cflag & CF_CREAD) != 0 && - (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && - (ch->ch_send & RR_RX_FLUSH) == 0) { - - if (ch->ch_rin + dlen >= RBUF_MAX) { - n = RBUF_MAX - ch->ch_rin; - - memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); - - ch->ch_rin = 0; - dbuf += n; - dlen -= n; - } - - memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); - - ch->ch_rin += dlen; - - - /* - * If we are not in fastcook mode, or - * if there is a fastcook thread - * waiting for data, send the data to - * the line discipline. - */ - - if ((ch->ch_flag & CH_FAST_READ) == 0 || - ch->ch_inwait != 0) { - dgrp_input(ch); - } - - /* - * If there is a read thread waiting - * in select, and we are in fastcook - * mode, wake him up. - */ - - if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && - (ch->ch_flag & CH_FAST_READ) != 0) - wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); - - /* - * Wake any thread waiting in the - * fastcook loop. - */ - - if ((ch->ch_flag & CH_INPUT) != 0) { - ch->ch_flag &= ~CH_INPUT; - wake_up_interruptible(&ch->ch_flag_wait); - } - } - - /* - * Fabricate and insert a data packet header to - * preced the remaining data when it comes in. - */ - - if (remain < plen) { - dlen = plen - remain; - b = buf; - - b[0] = 0x90 + n1; - put_unaligned_be16(dlen, b + 1); - - remain = 3; - if (remain > 0 && b != buf) - memcpy(buf, b, remain); - - nd->nd_remain = remain; - return; - } -} - /** * dgrp_receive() -- decode data packets received from the remote PortServer. * @nd: pointer to a node structure @@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd) plen = dlen + 1; dbuf = b + 1; - handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); - break; + goto data; /* * Process 2-byte header data packet. @@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd) plen = dlen + 2; dbuf = b + 2; - handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); - break; + goto data; /* * Process 3-byte header data packet. @@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd) dbuf = b + 3; + /* + * Common packet handling code. + */ + +data: + nd->nd_tx_work = 1; + + /* + * Otherwise data should appear only when we are + * in the CS_READY state. + */ + + if (ch->ch_state < CS_READY) { + error = "Data received before RWIN established"; + goto prot_error; + } + + /* + * Assure that the data received is within the + * allowable window. + */ + + n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; + + if (dlen > n) { + error = "Receive data overrun"; + goto prot_error; + } + + /* + * If we received 3 or less characters, + * assume it is a human typing, and set RTIME + * to 10 milliseconds. + * + * If we receive 10 or more characters, + * assume its not a human typing, and set RTIME + * to 100 milliseconds. + */ + + if (ch->ch_edelay != DGRP_RTIME) { + if (ch->ch_rtime != ch->ch_edelay) { + ch->ch_rtime = ch->ch_edelay; + ch->ch_flag |= CH_PARAM; + } + } else if (dlen <= 3) { + if (ch->ch_rtime != 10) { + ch->ch_rtime = 10; + ch->ch_flag |= CH_PARAM; + } + } else { + if (ch->ch_rtime != DGRP_RTIME) { + ch->ch_rtime = DGRP_RTIME; + ch->ch_flag |= CH_PARAM; + } + } + + /* + * If a portion of the packet is outside the + * buffer, shorten the effective length of the + * data packet to be the amount of data received. + */ + + if (remain < plen) + dlen -= plen - remain; + + /* + * Detect if receive flush is now complete. + */ + + if ((ch->ch_flag & CH_RX_FLUSH) != 0 && + ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= + ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { + ch->ch_flag &= ~CH_RX_FLUSH; + } + + /* + * If we are ready to receive, move the data into + * the receive buffer. + */ + + ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; + + if (ch->ch_state == CS_READY && + (ch->ch_tun.un_open_count != 0) && + (ch->ch_tun.un_flag & UN_CLOSING) == 0 && + (ch->ch_cflag & CF_CREAD) != 0 && + (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && + (ch->ch_send & RR_RX_FLUSH) == 0) { + + if (ch->ch_rin + dlen >= RBUF_MAX) { + n = RBUF_MAX - ch->ch_rin; + + memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); + + ch->ch_rin = 0; + dbuf += n; + dlen -= n; + } + + memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); + + ch->ch_rin += dlen; + + + /* + * If we are not in fastcook mode, or + * if there is a fastcook thread + * waiting for data, send the data to + * the line discipline. + */ + + if ((ch->ch_flag & CH_FAST_READ) == 0 || + ch->ch_inwait != 0) { + dgrp_input(ch); + } + + /* + * If there is a read thread waiting + * in select, and we are in fastcook + * mode, wake him up. + */ + + if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && + (ch->ch_flag & CH_FAST_READ) != 0) + wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); + + /* + * Wake any thread waiting in the + * fastcook loop. + */ + + if ((ch->ch_flag & CH_INPUT) != 0) { + ch->ch_flag &= ~CH_INPUT; + + wake_up_interruptible(&ch->ch_flag_wait); + } + } + + /* + * Fabricate and insert a data packet header to + * preced the remaining data when it comes in. + */ + + if (remain < plen) { + dlen = plen - remain; + b = buf; + + b[0] = 0x90 + n1; + put_unaligned_be16(dlen, b + 1); + + remain = 3; + goto done; + } break; /* diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c index f8788bf0a7d3..cdeffe75496b 100644 --- a/drivers/staging/gdm72xx/gdm_usb.c +++ b/drivers/staging/gdm72xx/gdm_usb.c @@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf, #endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ ret = register_wimax_device(phy_dev, &intf->dev); + if (ret) + release_usb(udev); out: if (ret) { kfree(phy_dev); kfree(udev); + usb_put_dev(usbdev); } else { usb_set_intfdata(intf, phy_dev); } diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h index 35154d60faf6..c9fedb79e3a2 100644 --- a/drivers/staging/iio/Documentation/iio_utils.h +++ b/drivers/staging/iio/Documentation/iio_utils.h @@ -77,7 +77,6 @@ struct iio_channel_info { uint64_t mask; unsigned be; unsigned is_signed; - unsigned enabled; unsigned location; }; @@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir, while (ent = readdir(dp), ent != NULL) { if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), "_en") == 0) { + int current_enabled = 0; current = &(*ci_array)[count++]; ret = asprintf(&filename, "%s/%s", scan_el_dir, ent->d_name); @@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir, ret = -errno; goto error_cleanup_array; } - fscanf(sysfsfp, "%u", ¤t->enabled); + fscanf(sysfsfp, "%u", ¤t_enabled); fclose(sysfsfp); - if (!current->enabled) { + if (!current_enabled) { free(filename); count--; continue; diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c index 5ea36410f716..5708ffc62aec 100644 --- a/drivers/staging/iio/adc/ad799x_core.c +++ b/drivers/staging/iio/adc/ad799x_core.c @@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = { }, { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_FALLING, - .mask_separate = BIT(IIO_EV_INFO_VALUE), + .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, { .type = IIO_EV_TYPE_THRESH, @@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .scan_index = (_index), \ - .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_realbits), \ + .storagebits = 16, \ + .shift = 12 - (_realbits), \ + .endianness = IIO_BE, \ + }, \ .event_spec = _ev_spec, \ .num_event_specs = _num_ev_spec, \ } @@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client, return 0; error_free_irq: - free_irq(client->irq, indio_dev); + if (client->irq > 0) + free_irq(client->irq, indio_dev); error_cleanup_ring: ad799x_ring_cleanup(indio_dev); error_disable_reg: diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index df71669bb60e..7fc66a6a6e36 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -1035,8 +1035,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4); SHOW_SCALE_AVAILABLE_ATTR(5); SHOW_SCALE_AVAILABLE_ATTR(6); SHOW_SCALE_AVAILABLE_ATTR(7); -SHOW_SCALE_AVAILABLE_ATTR(8); -SHOW_SCALE_AVAILABLE_ATTR(9); SHOW_SCALE_AVAILABLE_ATTR(10); SHOW_SCALE_AVAILABLE_ATTR(11); SHOW_SCALE_AVAILABLE_ATTR(12); @@ -1053,8 +1051,6 @@ static struct attribute *mxs_lradc_attributes[] = { &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, - &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr, - &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, @@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev) * of the array. */ scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> - (iio->channels[i].scan_type.realbits - s); + (LRADC_RESOLUTION - s); lradc->scale_avail[i][s].nano = do_div(scale_uv, 100000000) * 10; lradc->scale_avail[i][s].integer = scale_uv; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 0a4298b744e6..2b96665da8a2 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) struct iio_buffer *buffer; buffer = iio_kfifo_allocate(indio_dev); - if (buffer) + if (!buffer) return -ENOMEM; iio_device_attach_buffer(indio_dev, buffer); diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 09ef5fb8bae6..236ed66f116a 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) imx_drm_device_put(); - drm_vblank_cleanup(imxdrm->drm); - drm_kms_helper_poll_fini(imxdrm->drm); - drm_mode_config_cleanup(imxdrm->drm); + drm_vblank_cleanup(drm); + drm_kms_helper_poll_fini(drm); + drm_mode_config_cleanup(drm); return 0; } @@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format); int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) { - return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); + return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); } EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) { - drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); + drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); } EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) { - drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); + drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); } EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); @@ -370,29 +370,6 @@ static void imx_drm_connector_unregister( } /* - * register a crtc to the drm core - */ -static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) -{ - struct imx_drm_device *imxdrm = __imx_drm_device(); - int ret; - - ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); - if (ret) - return ret; - - drm_crtc_helper_add(imx_drm_crtc->crtc, - imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); - - drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, - imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); - - drm_mode_group_reinit(imxdrm->drm); - - return 0; -} - -/* * Called by the CRTC driver when all CRTCs are registered. This * puts all the pieces together and initializes the driver. * Once this is called no more CRTCs can be registered since @@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) mutex_lock(&imxdrm->mutex); - drm_kms_helper_poll_init(imxdrm->drm); + drm_kms_helper_poll_init(drm); /* setup the grouping for the legacy output */ - ret = drm_mode_group_init_legacy_group(imxdrm->drm, - &imxdrm->drm->primary->mode_group); + ret = drm_mode_group_init_legacy_group(drm, + &drm->primary->mode_group); if (ret) goto err_kms; - ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); + ret = drm_vblank_init(drm, MAX_CRTC); if (ret) goto err_kms; @@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) * by drm timer once a current process gives up ownership of * vblank event.(after drm_vblank_put function is called) */ - imxdrm->drm->vblank_disable_allowed = true; + drm->vblank_disable_allowed = true; if (!imx_drm_device_get()) { ret = -EINVAL; @@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, *new_crtc = imx_drm_crtc; - ret = imx_drm_crtc_register(imx_drm_crtc); + ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); if (ret) goto err_register; + drm_crtc_helper_add(crtc, + imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); + + drm_crtc_init(imxdrm->drm, crtc, + imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); + + drm_mode_group_reinit(imxdrm->drm); + imx_drm_update_possible_crtcs(); mutex_unlock(&imxdrm->mutex); diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c index f3a1f5e2e492..62ce0e86f14b 100644 --- a/drivers/staging/imx-drm/imx-hdmi.c +++ b/drivers/staging/imx-drm/imx-hdmi.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> +#include <linux/hdmi.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> @@ -52,11 +53,6 @@ enum hdmi_datamap { YCbCr422_12B = 0x12, }; -enum hdmi_colorimetry { - ITU601, - ITU709, -}; - enum imx_hdmi_devtype { IMX6Q_HDMI, IMX6DL_HDMI, @@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi) if (is_color_space_conversion(hdmi)) { if (hdmi->hdmi_data.enc_out_format == RGB) { - if (hdmi->hdmi_data.colorimetry == ITU601) + if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) csc_coeff = &csc_coeff_rgb_out_eitu601; else csc_coeff = &csc_coeff_rgb_out_eitu709; } else if (hdmi->hdmi_data.enc_in_format == RGB) { - if (hdmi->hdmi_data.colorimetry == ITU601) + if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) csc_coeff = &csc_coeff_rgb_in_eitu601; else csc_coeff = &csc_coeff_rgb_in_eitu709; @@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi) /* Set up colorimetry */ if (hdmi->hdmi_data.enc_out_format == XVYCC444) { colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; - if (hdmi->hdmi_data.colorimetry == ITU601) + if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; - else /* hdmi->hdmi_data.colorimetry == ITU709 */ + else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; } else if (hdmi->hdmi_data.enc_out_format != RGB) { - if (hdmi->hdmi_data.colorimetry == ITU601) + if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; - else /* hdmi->hdmi_data.colorimetry == ITU709 */ + else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; } else { /* Carries no data */ @@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode) (hdmi->vic == 21) || (hdmi->vic == 22) || (hdmi->vic == 2) || (hdmi->vic == 3) || (hdmi->vic == 17) || (hdmi->vic == 18)) - hdmi->hdmi_data.colorimetry = ITU601; + hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601; else - hdmi->hdmi_data.colorimetry = ITU709; + hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; if ((hdmi->vic == 10) || (hdmi->vic == 11) || (hdmi->vic == 12) || (hdmi->vic == 13) || diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO index 22742d6d62a8..0a2b6cb3775e 100644 --- a/drivers/staging/lustre/TODO +++ b/drivers/staging/lustre/TODO @@ -9,5 +9,6 @@ * Other minor misc cleanups... Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger -<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing -hpdd-discuss <hpdd-discuss@lists.01.org> would be great too. +<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and +Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org> +would be great too. diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h index 596a15fc8996..037ae8a6d531 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h @@ -61,6 +61,8 @@ struct kuc_hdr { __u16 kuc_msglen; /* Including header */ } __attribute__((aligned(sizeof(__u64)))); +#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE) + #define KUC_MAGIC 0x191C /*Lustre9etLinC */ #define KUC_FL_BLOCK 0x01 /* Wait for send */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index d0d942ced01a..dddccca120c9 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h @@ -120,7 +120,7 @@ do { \ do { \ LASSERT(!in_interrupt() || \ ((size) <= LIBCFS_VMALLOC_SIZE && \ - ((mask) & GFP_ATOMIC)) != 0); \ + ((mask) & __GFP_WAIT) == 0)); \ } while (0) #define LIBCFS_ALLOC_POST(ptr, size) \ diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 93648632ba26..6f58ead20393 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) { struct page *page; - if (is_vmalloc_addr(vaddr)) { + if (is_vmalloc_addr((void *)vaddr)) { page = vmalloc_to_page ((void *)vaddr); LASSERT (page != NULL); return page; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 68a4f52ec998..b7b53b579c85 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - int mpflag = 0; + int mpflag = 1; int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; unsigned int payload_niov = lntmsg->msg_niov; @@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) /* The first fragment will be set later in pro_pack */ rc = ksocknal_launch_packet(ni, tx, target); - if (lntmsg->msg_vmflush) + if (!mpflag) cfs_memory_pressure_restore(mpflag); + if (rc == 0) return (0); diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 6b6c0240e824..7893d83e131f 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error) *flags |= (error << CLF_HSM_ERR_L); } -#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec)) +#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \ + sizeof(struct changelog_ext_rec)) struct changelog_rec { __u16 cr_namelen; diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 22d0acc95bc5..52b7731bcc38 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) break; case Q_GETQUOTA: if (((type == USRQUOTA && - uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || + !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || (type == GRPQUOTA && !in_egroup_p(make_kgid(&init_user_ns, id)))) && (!cfs_capable(CFS_CAP_SYS_ADMIN) || diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index d1ad91c34ddc..83013927e131 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags) { struct kuc_hdr *lh = (struct kuc_hdr *)buf; - LASSERT(len <= CR_MAXSIZE); + LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE); lh->kuc_magic = KUC_MAGIC; lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; @@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata) CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n", cs->cs_fp, cs->cs_startrec); - OBD_ALLOC(cs->cs_buf, CR_MAXSIZE); + OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE); if (cs->cs_buf == NULL) GOTO(out, rc = -ENOMEM); @@ -1540,7 +1540,7 @@ out: if (ctxt) llog_ctxt_put(ctxt); if (cs->cs_buf) - OBD_FREE(cs->cs_buf, CR_MAXSIZE); + OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE); OBD_FREE_PTR(cs); return rc; } diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index eedffed17e39..31b269a5fff7 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, } static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { return (u16)smp_processor_id(); } @@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv, priv->mii_bus->write = xlr_mii_write; priv->mii_bus->parent = &pdev->dev; priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (priv->mii_bus->irq == NULL) { + pr_err("irq alloc failed\n"); + mdiobus_free(priv->mii_bus); + return -ENOMEM; + } priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; /* Scan only the enabled address */ diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c index 47e0a91238a1..5a001d9b4252 100644 --- a/drivers/staging/octeon-usb/octeon-hcd.c +++ b/drivers/staging/octeon-usb/octeon-hcd.c @@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags { */ #define MAX_TRANSFER_PACKETS ((1<<10)-1) -enum { - USB_CLOCK_TYPE_REF_12, - USB_CLOCK_TYPE_REF_24, - USB_CLOCK_TYPE_REF_48, - USB_CLOCK_TYPE_CRYSTAL_12, -}; - /** * Logical transactions may take numerous low level * transactions, especially when splits are concerned. This @@ -471,19 +464,6 @@ struct octeon_hcd { /* Returns the IO address to push/pop stuff data from the FIFOs */ #define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) -static int octeon_usb_get_clock_type(void) -{ - switch (cvmx_sysinfo_get()->board_type) { - case CVMX_BOARD_TYPE_BBGW_REF: - case CVMX_BOARD_TYPE_LANAI2_A: - case CVMX_BOARD_TYPE_LANAI2_U: - case CVMX_BOARD_TYPE_LANAI2_G: - case CVMX_BOARD_TYPE_UBNT_E100: - return USB_CLOCK_TYPE_CRYSTAL_12; - } - return USB_CLOCK_TYPE_REF_48; -} - /** * Read a USB 32bit CSR. It performs the necessary address swizzle * for 32bit CSRs and logs the value in a readable format if @@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) return 0; /* Data0 */ } - -/** - * Return the number of USB ports supported by this Octeon - * chip. If the chip doesn't support USB, or is not supported - * by this API, a zero will be returned. Most Octeon chips - * support one usb port, but some support two ports. - * cvmx_usb_initialize() must be called on independent - * struct cvmx_usb_state. - * - * Returns: Number of port, zero if usb isn't supported - */ -static int cvmx_usb_get_num_ports(void) -{ - int arch_ports = 0; - - if (OCTEON_IS_MODEL(OCTEON_CN56XX)) - arch_ports = 1; - else if (OCTEON_IS_MODEL(OCTEON_CN52XX)) - arch_ports = 2; - else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) - arch_ports = 1; - else if (OCTEON_IS_MODEL(OCTEON_CN31XX)) - arch_ports = 1; - else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) - arch_ports = 1; - else - arch_ports = 0; - - return arch_ports; -} - /** * Initialize a USB port for use. This must be called before any * other access to the Octeon USB port is made. The port starts @@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void) * Returns: 0 or a negative error code. */ static int cvmx_usb_initialize(struct cvmx_usb_state *usb, - int usb_port_number) + int usb_port_number, + enum cvmx_usb_initialize_flags flags) { union cvmx_usbnx_clk_ctl usbn_clk_ctl; union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; - enum cvmx_usb_initialize_flags flags = 0; int i; /* At first allow 0-1 for the usb port number */ if ((usb_port_number < 0) || (usb_port_number > 1)) return -EINVAL; - /* For all chips except 52XX there is only one port */ - if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0)) - return -EINVAL; - /* Try to determine clock type automatically */ - if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) { - /* Only 12 MHZ crystals are supported */ - flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; - } else { - flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; - - switch (octeon_usb_get_clock_type()) { - case USB_CLOCK_TYPE_REF_12: - flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; - break; - case USB_CLOCK_TYPE_REF_24: - flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; - break; - case USB_CLOCK_TYPE_REF_48: - flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; - break; - default: - return -EINVAL; - break; - } - } memset(usb, 0, sizeof(*usb)); usb->init_flags = flags; @@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, return 0; } - static const struct hc_driver octeon_hc_driver = { .description = "Octeon USB", .product_desc = "Octeon Host Controller", @@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = { .hub_control = octeon_usb_hub_control, }; - -static int octeon_usb_driver_probe(struct device *dev) +static int octeon_usb_probe(struct platform_device *pdev) { int status; - int usb_num = to_platform_device(dev)->id; - int irq = platform_get_irq(to_platform_device(dev), 0); + int initialize_flags; + int usb_num; + struct resource *res_mem; + struct device_node *usbn_node; + int irq = platform_get_irq(pdev, 0); + struct device *dev = &pdev->dev; struct octeon_hcd *priv; struct usb_hcd *hcd; unsigned long flags; + u32 clock_rate = 48000000; + bool is_crystal_clock = false; + const char *clock_type; + int i; + + if (dev->of_node == NULL) { + dev_err(dev, "Error: empty of_node\n"); + return -ENXIO; + } + usbn_node = dev->of_node->parent; + + i = of_property_read_u32(usbn_node, + "refclk-frequency", &clock_rate); + if (i) { + dev_err(dev, "No USBN \"refclk-frequency\"\n"); + return -ENXIO; + } + switch (clock_rate) { + case 12000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; + break; + case 24000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; + break; + case 48000000: + initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; + break; + default: + dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate); + return -ENXIO; + + } + + i = of_property_read_string(usbn_node, + "refclk-type", &clock_type); + + if (!i && strcmp("crystal", clock_type) == 0) + is_crystal_clock = true; + + if (is_crystal_clock) + initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; + else + initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mem == NULL) { + dev_err(dev, "found no memory resource\n"); + return -ENXIO; + } + usb_num = (res_mem->start >> 44) & 1; + + if (irq < 0) { + /* Defective device tree, but we know how to fix it. */ + irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; + irq = irq_create_mapping(NULL, hwirq); + } /* * Set the DMA mask to 64bits so we get buffers already translated for @@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev) dev->coherent_dma_mask = ~0; dev->dma_mask = &dev->coherent_dma_mask; + /* + * Only cn52XX and cn56XX have DWC_OTG USB hardware and the + * IOB priority registers. Under heavy network load USB + * hardware can be starved by the IOB causing a crash. Give + * it a priority boost if it has been waiting more than 400 + * cycles to avoid this situation. + * + * Testing indicates that a cnt_val of 8192 is not sufficient, + * but no failures are seen with 4096. We choose a value of + * 400 to give a safety factor of 10. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { + union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; + + pri_cnt.u64 = 0; + pri_cnt.s.cnt_enb = 1; + pri_cnt.s.cnt_val = 400; + cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); + } + hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); if (!hcd) { dev_dbg(dev, "Failed to allocate memory for HCD\n"); @@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev) tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv); INIT_LIST_HEAD(&priv->dequeue_list); - status = cvmx_usb_initialize(&priv->usb, usb_num); + status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags); if (status) { dev_dbg(dev, "USB initialization failed with %d\n", status); kfree(hcd); @@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev) cvmx_usb_poll(&priv->usb); spin_unlock_irqrestore(&priv->lock, flags); - status = usb_add_hcd(hcd, irq, IRQF_SHARED); + status = usb_add_hcd(hcd, irq, 0); if (status) { dev_dbg(dev, "USB add HCD failed with %d\n", status); kfree(hcd); @@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev) } device_wakeup_enable(hcd->self.controller); - dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); + dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); return 0; } -static int octeon_usb_driver_remove(struct device *dev) +static int octeon_usb_remove(struct platform_device *pdev) { int status; + struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); struct octeon_hcd *priv = hcd_to_octeon(hcd); unsigned long flags; @@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev) return 0; } -static struct device_driver octeon_usb_driver = { - .name = "OcteonUSB", - .bus = &platform_bus_type, - .probe = octeon_usb_driver_probe, - .remove = octeon_usb_driver_remove, +static struct of_device_id octeon_usb_match[] = { + { + .compatible = "cavium,octeon-5750-usbc", + }, + {}, }; +static struct platform_driver octeon_usb_driver = { + .driver = { + .name = "OcteonUSB", + .owner = THIS_MODULE, + .of_match_table = octeon_usb_match, + }, + .probe = octeon_usb_probe, + .remove = octeon_usb_remove, +}; -#define MAX_USB_PORTS 10 -static struct platform_device *pdev_glob[MAX_USB_PORTS]; -static int octeon_usb_registered; -static int __init octeon_usb_module_init(void) +static int __init octeon_usb_driver_init(void) { - int num_devices = cvmx_usb_get_num_ports(); - int device; - - if (usb_disabled() || num_devices == 0) - return -ENODEV; - - if (driver_register(&octeon_usb_driver)) - return -ENOMEM; - - octeon_usb_registered = 1; - - /* - * Only cn52XX and cn56XX have DWC_OTG USB hardware and the - * IOB priority registers. Under heavy network load USB - * hardware can be starved by the IOB causing a crash. Give - * it a priority boost if it has been waiting more than 400 - * cycles to avoid this situation. - * - * Testing indicates that a cnt_val of 8192 is not sufficient, - * but no failures are seen with 4096. We choose a value of - * 400 to give a safety factor of 10. - */ - if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { - union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; - - pri_cnt.u64 = 0; - pri_cnt.s.cnt_enb = 1; - pri_cnt.s.cnt_val = 400; - cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); - } - - for (device = 0; device < num_devices; device++) { - struct resource irq_resource; - struct platform_device *pdev; - memset(&irq_resource, 0, sizeof(irq_resource)); - irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1; - irq_resource.end = irq_resource.start; - irq_resource.flags = IORESOURCE_IRQ; - pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1); - if (IS_ERR(pdev)) { - driver_unregister(&octeon_usb_driver); - octeon_usb_registered = 0; - return PTR_ERR(pdev); - } - if (device < MAX_USB_PORTS) - pdev_glob[device] = pdev; + if (usb_disabled()) + return 0; - } - return 0; + return platform_driver_register(&octeon_usb_driver); } +module_init(octeon_usb_driver_init); -static void __exit octeon_usb_module_cleanup(void) +static void __exit octeon_usb_driver_exit(void) { - int i; + if (usb_disabled()) + return; - for (i = 0; i < MAX_USB_PORTS; i++) - if (pdev_glob[i]) { - platform_device_unregister(pdev_glob[i]); - pdev_glob[i] = NULL; - } - if (octeon_usb_registered) - driver_unregister(&octeon_usb_driver); + platform_driver_unregister(&octeon_usb_driver); } +module_exit(octeon_usb_driver_exit); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); -MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver."); -module_init(octeon_usb_module_init); -module_exit(octeon_usb_module_cleanup); +MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver."); diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c index cb060364dfe7..5d965cf06d59 100644 --- a/drivers/staging/ozwpan/ozproto.c +++ b/drivers/staging/ozwpan/ozproto.c @@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev) if (binding) { binding->ptype.type = __constant_htons(OZ_ETHERTYPE); binding->ptype.func = oz_pkt_recv; - memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); if (net_dev && *net_dev) { + memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); oz_dbg(ON, "Adding binding: %s\n", net_dev); binding->ptype.dev = dev_get_by_name(&init_net, net_dev); @@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev) } } else { oz_dbg(ON, "Binding to all netcards\n"); + memset(binding->name, 0, OZ_MAX_BINDING_LEN); binding->ptype.dev = NULL; } if (binding) { diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c index 153ec61493ab..96df62f95b6b 100644 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c @@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) unsigned char *pbuf; u32 wpa_ielen = 0; u8 *pbssid = GetAddr3Ptr(pframe); - u32 hidden_ssid = 0; struct HT_info_element *pht_info = NULL; struct rtw_ieee80211_ht_cap *pht_cap = NULL; u32 bcn_channel; unsigned short ht_cap_info; unsigned char ht_info_infos_0; + int ssid_len; if (is_client_associated_to_ap(Adapter) == false) return true; @@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) } /* checking SSID */ + ssid_len = 0; p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_); - if (p == NULL) { - DBG_88E("%s marc: cannot find SSID for survey event\n", __func__); - hidden_ssid = true; - } else { - hidden_ssid = false; - } - - if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) { - memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1)); - bssid->Ssid.SsidLength = *(p + 1); - } else { - bssid->Ssid.SsidLength = 0; - bssid->Ssid.Ssid[0] = '\0'; + if (p) { + ssid_len = *(p + 1); + if (ssid_len > NDIS_802_11_LENGTH_SSID) + ssid_len = 0; } + memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len); + bssid->Ssid.SsidLength = ssid_len; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d " "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid, diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index dec992569476..4ad80ae1067f 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", poidparam->subcode, poidparam->len, len)); - if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) { + if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) { RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); ret = -EINVAL; goto _rtw_mp_ioctl_hdl_exit; @@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev, u8 *p2pie; uint p2pielen = 0, attr_contentlen = 0; u8 attr_content[100] = {0x00}; - - u8 go_devadd_str[17 + 10] = {0x00}; - /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */ + u8 go_devadd_str[17 + 12] = {}; /* Commented by Albert 20121209 */ /* The input data is the GO's interface address which the application wants to know its device address. */ @@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev, spin_unlock_bh(&pmlmepriv->scanned_queue.lock); if (!blnMatch) - sprintf(go_devadd_str, "\n\ndev_add = NULL"); + snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL"); else - sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", + snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); - if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17)) + if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str))) return -EFAULT; return ret; } diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 68f98fa114d2..7c9ee58f47bb 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 0a341d6ec51f..a70dcef1419e 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ /*=== Customer ID ===*/ /****** 8188EUS ********/ - {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ + {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig index 2aa5dac2f1df..abccc9dabd65 100644 --- a/drivers/staging/rtl8821ae/Kconfig +++ b/drivers/staging/rtl8821ae/Kconfig @@ -1,6 +1,6 @@ config R8821AE tristate "RealTek RTL8821AE Wireless LAN NIC driver" - depends on PCI && WLAN + depends on PCI && WLAN && MAC80211 depends on m select WIRELESS_EXT select WEXT_PRIV diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h index cfe88a1efd55..76bef93ad70a 100644 --- a/drivers/staging/rtl8821ae/wifi.h +++ b/drivers/staging/rtl8821ae/wifi.h @@ -1414,7 +1414,7 @@ struct rtl_dm { /*88e tx power tracking*/ - u8 bb_swing_idx_ofdm[2]; + u8 bb_swing_idx_ofdm[MAX_RF_PATH]; u8 bb_swing_idx_ofdm_current; u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; bool bb_swing_flag_Ofdm; diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c index 3c8d28b771e0..81ff8522405c 100644 --- a/drivers/staging/usbip/userspace/libsrc/names.c +++ b/drivers/staging/usbip/userspace/libsrc/names.c @@ -169,14 +169,14 @@ static void *my_malloc(size_t size) struct pool *p; p = calloc(1, sizeof(struct pool)); - if (!p) { - free(p); + if (!p) return NULL; - } p->mem = calloc(1, size); - if (!p->mem) + if (!p->mem) { + free(p); return NULL; + } p->next = pool_head; pool_head = p; diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c index 9b51586d11d9..0141bc34d5cc 100644 --- a/drivers/staging/usbip/vhci_sysfs.c +++ b/drivers/staging/usbip/vhci_sysfs.c @@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed) case USB_SPEED_WIRELESS: break; default: - pr_err("speed %d\n", speed); + pr_err("Failed attach request for unsupported USB speed: %s\n", + usb_speed_string(speed)); return -EINVAL; } diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c index 4a1ddaf5e00f..187fc060de26 100644 --- a/drivers/staging/wlags49_h2/wl_wext.c +++ b/drivers/staging/wlags49_h2/wl_wext.c @@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in goto out; } - if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) { + if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) { ret = -EINVAL; goto out; } diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index e048d6439f4a..cda4d80cfaef 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -507,7 +507,9 @@ int iscsit_handle_status_snack( u32 last_statsn; int found_cmd; - if (conn->exp_statsn > begrun) { + if (!begrun) { + begrun = conn->exp_statsn; + } else if (conn->exp_statsn > begrun) { pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" " 0x%08x but already got ExpStatSN: 0x%08x on CID:" " %hu.\n", begrun, runlength, conn->exp_statsn, diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 12da9b386169..c3d9df6aaf5f 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent( if (segment_mult) { u64 tmp = lba; - start_lba = sector_div(tmp, segment_size * segment_mult); + start_lba = do_div(tmp, segment_size * segment_mult); last_lba = first_lba + segment_size - 1; if (start_lba >= first_lba && diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 2f5d77932c80..3013287a2aaa 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; sense_reason_t ret = TCM_NO_SENSE; - int pr_holder = 0; + int pr_holder = 0, type; if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); @@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, ret = TCM_RESERVATION_CONFLICT; goto out; } + type = pr_reg->pr_res_type; spin_lock(&pr_tmpl->registration_lock); /* @@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, * Release the calling I_T Nexus registration now.. */ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); + pr_reg = NULL; /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, * RESERVATIONS RELEASED. */ if (pr_holder && - (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || - pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { + (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || + type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, pr_reg_list) { @@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, ret = core_scsi3_update_and_write_aptpl(dev, aptpl); out: - core_scsi3_put_pr_reg(pr_reg); + if (pr_reg) + core_scsi3_put_pr_reg(pr_reg); return ret; } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index fa3cae393e13..a4489444ffbc 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1074,12 +1074,19 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, struct scatterlist *psg; void *paddr, *addr; unsigned int i, len, left; + unsigned int offset = 0; left = sectors * dev->prot_length; for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { len = min(psg->length, left); + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + sg_off = sg->offset; + } + paddr = kmap_atomic(sg_page(psg)) + psg->offset; addr = kmap_atomic(sg_page(sg)) + sg_off; @@ -1089,6 +1096,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, memcpy(addr, paddr, len); left -= len; + offset += len; kunmap_atomic(paddr); kunmap_atomic(addr); } diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 43c5ca9878bc..3bebc71ea033 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -440,8 +440,8 @@ check_scsi_name: padding = ((-scsi_target_len) & 3); if (padding) scsi_target_len += padding; - if (scsi_name_len > 256) - scsi_name_len = 256; + if (scsi_target_len > 256) + scsi_target_len = 256; buf[off-1] = scsi_target_len; off += scsi_target_len; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c50fd9f11aab..24b4f65d8777 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) return; } - if (!success) - cmd->transport_state |= CMD_T_FAILED; - /* * Check for case where an explicit ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. @@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&cmd->t_transport_stop_comp); return; - } else if (cmd->transport_state & CMD_T_FAILED) { + } else if (!success) { INIT_WORK(&cmd->work, target_complete_failure_work); } else { INIT_WORK(&cmd->work, target_complete_ok_work); diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 6496872e2e47..b01659bd4f7c 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -255,13 +255,7 @@ static int __init hvc_opal_init(void) /* Register as a vio device to receive callbacks */ return platform_driver_register(&hvc_opal_driver); } -module_init(hvc_opal_init); - -static void __exit hvc_opal_exit(void) -{ - platform_driver_unregister(&hvc_opal_driver); -} -module_exit(hvc_opal_exit); +device_initcall(hvc_opal_init); static void udbg_opal_putc(char c) { diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c index 0069bb86ba49..08c87920b74a 100644 --- a/drivers/tty/hvc/hvc_rtas.c +++ b/drivers/tty/hvc/hvc_rtas.c @@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void) return 0; } -module_init(hvc_rtas_init); - -/* This will tear down the tty portion of the driver */ -static void __exit hvc_rtas_exit(void) -{ - /* Really the fun isn't over until the worker thread breaks down and - * the tty cleans up */ - if (hvc_rtas_dev) - hvc_remove(hvc_rtas_dev); -} -module_exit(hvc_rtas_exit); +device_initcall(hvc_rtas_init); /* This will happen prior to module init. There is no tty at this time? */ static int __init hvc_rtas_console_init(void) diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c index 72228276fe31..9cf573d06a29 100644 --- a/drivers/tty/hvc/hvc_udbg.c +++ b/drivers/tty/hvc/hvc_udbg.c @@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void) return 0; } -module_init(hvc_udbg_init); - -static void __exit hvc_udbg_exit(void) -{ - if (hvc_udbg_dev) - hvc_remove(hvc_udbg_dev); -} -module_exit(hvc_udbg_exit); +device_initcall(hvc_udbg_init); static int __init hvc_udbg_console_init(void) { diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 636c9baad7a5..2dc2831840ca 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -561,18 +561,7 @@ static int __init xen_hvc_init(void) #endif return r; } - -static void __exit xen_hvc_fini(void) -{ - struct xencons_info *entry, *next; - - if (list_empty(&xenconsoles)) - return; - - list_for_each_entry_safe(entry, next, &xenconsoles, list) { - xen_console_remove(entry); - } -} +device_initcall(xen_hvc_init); static int xen_cons_init(void) { @@ -598,10 +587,6 @@ static int xen_cons_init(void) hvc_instantiate(HVC_COOKIE, 0, ops); return 0; } - - -module_init(xen_hvc_init); -module_exit(xen_hvc_fini); console_initcall(xen_cons_init); #ifdef CONFIG_EARLY_PRINTK diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f34461c5f14e..2ebe47b78a3e 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) { unsigned int addr = 0; unsigned int modem = 0; + unsigned int brk = 0; struct gsm_dlci *dlci; int len = clen; u8 *dp = data; @@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) if (len == 0) return; } + len--; + if (len > 0) { + while (gsm_read_ea(&brk, *dp++) == 0) { + len--; + if (len == 0) + return; + } + modem <<= 7; + modem |= (brk & 0x7f); + } tty = tty_port_tty_get(&dlci->port); gsm_process_modem(tty, dlci, modem, clen); if (tty) { diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cb8017aa4434..d15624c1b751 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty) struct n_tty_data *ldata = tty->disc_data; size_t echoed; - if ((!L_ECHO(tty) && !L_ECHONL(tty)) || - ldata->echo_mark == ldata->echo_tail) + if (ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); @@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c) if (L_ECHO(tty)) { echo_char(c, tty); commit_echoes(tty); - } + } else + process_echoes(tty); isig(signal, tty); return; } @@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) if (I_IXON(tty)) { if (c == START_CHAR(tty)) { start_tty(tty); - commit_echoes(tty); + process_echoes(tty); return 0; } if (c == STOP_CHAR(tty)) { @@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) * Fix tty hang when I_IXON(tty) is cleared, but the tty * been stopped by STOP_CHAR(tty) before it. */ - if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { start_tty(tty); + process_echoes(tty); + } /* The termios change make the tty ready for I/O */ if (waitqueue_active(&tty->write_wait)) @@ -1896,7 +1898,7 @@ err: static inline int input_available_p(struct tty_struct *tty, int poll) { struct n_tty_data *ldata = tty->disc_data; - int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1; + int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1; if (ldata->icanon && !L_EXTPROC(tty)) { if (ldata->canon_head != ldata->read_tail) diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 61ecd709a722..69932b7556cf 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, serial_dl_write(up, quot); /* + * XR17V35x UARTs have an extra fractional divisor register (DLD) + * + * We need to recalculate all of the registers, because DLM and DLL + * are already rounded to a whole integer. + * + * When recalculating we use a 32x clock instead of a 16x clock to + * allow 1-bit for rounding in the fractional part. + */ + if (up->port.type == PORT_XR17V35X) { + unsigned int baud_x32 = (port->uartclk * 2) / baud; + u16 quot = baud_x32 / 32; + u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2); + + serial_dl_write(up, quot); + serial_port_out(port, 0x2, quot_frac & 0xf); + } + + /* * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR * is written without DLAB set, this mode will be disabled. */ diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index faa64e646100..ed3113576740 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int dw8250_suspend(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); @@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_RUNTIME static int dw8250_runtime_suspend(struct device *dev) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 50228eed3b6f..0ff3e3624d4c 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv, { unsigned int bar; - if ((priv->dev->subsystem_device & 0xff00) == 0x3000) { + if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) && + (priv->dev->subsystem_device & 0xff00) == 0x3000) { /* netmos apparently orders BARs by datasheet layout, so serial * ports get BARs 0 and 3 (or 1 and 4 for memmapped) */ diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index fa511ebab67c..77f035158d6c 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port) return retval; } disable_irq(up->wakeirq); - } else { - dev_info(up->port.dev, "no wakeirq for uart%d\n", - up->port.line); } dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); @@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up, flags & SER_RS485_RTS_AFTER_SEND); if (ret < 0) return ret; - } else + } else if (up->rts_gpio == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else { up->rts_gpio = -EINVAL; + } if (of_property_read_u32_array(np, "rs485-rts-delay", rs485_delay, 2) == 0) { @@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev) up->port.iotype = UPIO_MEM; up->port.irq = uartirq; up->wakeirq = wakeirq; + if (!up->wakeirq) + dev_info(up->port.dev, "no wakeirq for uart%d\n", + up->port.line); up->port.regshift = 2; up->port.fifosize = 64; diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c index 49a2ffd101a7..b7bfe24d4ebc 100644 --- a/drivers/tty/serial/sirfsoc_uart.c +++ b/drivers/tty/serial/sirfsoc_uart.c @@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param) wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); - sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); spin_unlock_irqrestore(&sirfport->rx_lock, flags); + spin_lock(&port->lock); + sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); + spin_unlock(&port->lock); if (sirfport->rx_io_count == 4) { spin_lock_irqsave(&sirfport->rx_lock, flags); sirfport->rx_io_count = 0; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c74a00ad7add..bd2715a9d8e5 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1267,16 +1267,17 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p) * @p: output buffer of at least 7 bytes * * Generate a name from a driver reference and write it to the output - * buffer. + * buffer. Return the number of bytes written. * * Locking: None */ -static void tty_line_name(struct tty_driver *driver, int index, char *p) +static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) { if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) - strcpy(p, driver->name); + return sprintf(p, "%s", driver->name); else - sprintf(p, "%s%d", driver->name, index + driver->name_base); + return sprintf(p, "%s%d", driver->name, + index + driver->name_base); } /** @@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev, if (i >= ARRAY_SIZE(cs)) break; } - while (i--) - count += sprintf(buf + count, "%s%d%c", - cs[i]->name, cs[i]->index, i ? ' ':'\n'); + while (i--) { + struct tty_driver *driver; + const char *name = cs[i]->name; + int index = cs[i]->index; + + driver = cs[i]->device(cs[i], &index); + if (driver) { + count += tty_line_name(driver, index, buf + count); + count += sprintf(buf + count, "%c", i ? ' ':'\n'); + } else + count += sprintf(buf + count, "%s%d%c", + name, index, i ? ' ':'\n'); + } console_unlock(); return count; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 61b1137d7e56..23b5d32954bf 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar) scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, vc->vc_screenbuf_size >> 1); set_origin(vc); + if (CON_IS_VISIBLE(vc)) + update_screen(vc); /* fall through */ case 2: /* erase whole display */ count = vc->vc_cols * vc->vc_rows; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 5d01558cef66..ab90a0156828 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, dynid->id.idProduct = idProduct; dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; if (fields > 2 && bInterfaceClass) { - if (bInterfaceClass > 255) - return -EINVAL; + if (bInterfaceClass > 255) { + retval = -EINVAL; + goto fail; + } dynid->id.bInterfaceClass = (u8)bInterfaceClass; dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; @@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, if (fields > 4) { const struct usb_device_id *id = id_table; - if (!id) - return -ENODEV; + if (!id) { + retval = -ENODEV; + goto fail; + } for (; id->match_flags; id++) if (id->idVendor == refVendor && id->idProduct == refProduct) break; - if (id->match_flags) + if (id->match_flags) { dynid->id.driver_info = id->driver_info; - else - return -ENODEV; + } else { + retval = -ENODEV; + goto fail; + } } spin_lock(&dynids->lock); @@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, if (retval) return retval; return count; + +fail: + kfree(dynid); + return retval; } EXPORT_SYMBOL_GPL(usb_store_new_id); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 199aaea6bfe0..2518c3250750 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd) dev_name(&usb_dev->dev), retval); return retval; } - usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); } retval = usb_new_device (usb_dev); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index babba885978d..64ea21971be2 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) return usb_get_intfdata(hdev->actconfig->interface[0]); } -int usb_device_supports_lpm(struct usb_device *udev) +static int usb_device_supports_lpm(struct usb_device *udev) { /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. @@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev) "Power management will be impacted.\n"); return 0; } - - /* udev is root hub */ - if (!udev->parent) - return 1; - if (udev->parent->lpm_capable) return 1; diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index c49383669cd8..823857767a16 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev, unsigned int size); extern int usb_get_bos_descriptor(struct usb_device *dev); extern void usb_release_bos_descriptor(struct usb_device *dev); -extern int usb_device_supports_lpm(struct usb_device *udev); extern char *usb_cache_string(struct usb_device *udev, int index); extern int usb_set_configuration(struct usb_device *dev, int configuration); extern int usb_choose_configuration(struct usb_device *udev); diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 8565d87f94b4..1d129884cc39 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) int retval = 0; if (!select_phy) - return -ENODEV; + return 0; usbcfg = readl(hsotg->regs + GUSBCFG); diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index f59484d43b35..4d918ed8d343 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); - int is_control = usb_endpoint_xfer_control(&ep->desc); - int is_out = usb_endpoint_dir_out(&ep->desc); - int epnum = usb_endpoint_num(&ep->desc); - struct usb_device *udev; unsigned long flags; dev_dbg(hsotg->dev, "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", ep->desc.bEndpointAddress); - udev = to_usb_device(hsotg->dev); - spin_lock_irqsave(&hsotg->lock, flags); - - usb_settoggle(udev, epnum, is_out, 0); - if (is_control) - usb_settoggle(udev, epnum, !is_out, 0); dwc2_hcd_endpoint_reset(hsotg, ep); - spin_unlock_irqrestore(&hsotg->lock, flags); } diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index d01d0d3f2cf0..eaba547ce26b 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev) int retval; int irq; + if (usb_disabled()) + return -ENODEV; + match = of_match_device(dwc2_of_match_table, &dev->dev); if (match && match->data) { params = match->data; diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index b016d38199f2..eb009a457fb5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) addr, (unsigned int)temp); addr = &ir_set->erst_base; - temp_64 = readq(addr); + temp_64 = xhci_read_64(xhci, addr); xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", addr, temp_64); addr = &ir_set->erst_dequeue; - temp_64 = readq(addr); + temp_64 = xhci_read_64(xhci, addr); xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", addr, temp_64); } @@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) { u64 val; - val = readq(&xhci->op_regs->cmd_ring); + val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", lower_32_bits(val)); xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 873c272b3ef5..bce4391a0e7d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_warn(xhci, "WARN something wrong with SW event ring " "dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ - temp = readq(&xhci->ir_set->erst_dequeue); + temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp &= ERST_PTR_MASK; /* Don't clear the EHB bit (which is RW1C) because * there might be more events to service. @@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Write event ring dequeue pointer, " "preserving EHB bit"); - writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp, + xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } @@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Device context base array address = 0x%llx (DMA), %p (virt)", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); - writeq(dma, &xhci->op_regs->dcbaa_ptr); + xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); /* * Initialize the ring segment pool. The ring must be a contiguous @@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ - val_64 = readq(&xhci->op_regs->cmd_ring); + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Setting command ring address to 0x%x", val); - writeq(val_64, &xhci->op_regs->cmd_ring); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); @@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Set ERST base address for ir_set 0 = 0x%llx", (unsigned long long)xhci->erst.erst_dma_addr); - val_64 = readq(&xhci->ir_set->erst_base); + val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); - writeq(val_64, &xhci->ir_set->erst_base); + xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 3c898c12a06b..04f986d9234f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) "QUIRK: Resetting on resume"); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + pdev->device == 0x0015 && + pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && + pdev->subsystem_device == 0xc0cd) + xhci->quirks |= XHCI_RESET_ON_RESUME; if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a0b248c34526..0ed64eb68e48 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) return 0; } - temp_64 = readq(&xhci->op_regs->cmd_ring); + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); if (!(temp_64 & CMD_RING_RUNNING)) { xhci_dbg(xhci, "Command ring had been stopped\n"); return 0; } xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; - writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, + &xhci->op_regs->cmd_ring); /* Section 4.6.1.2 of xHCI 1.0 spec says software should * time the completion od all xHCI commands, including @@ -2864,8 +2865,9 @@ hw_died: /* Clear the event handler busy flag (RW1C); * the event ring should be empty. */ - temp_64 = readq(&xhci->ir_set->erst_dequeue); - writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + xhci_write_64(xhci, temp_64 | ERST_EHB, + &xhci->ir_set->erst_dequeue); spin_unlock(&xhci->lock); return IRQ_HANDLED; @@ -2877,7 +2879,7 @@ hw_died: */ while (xhci_handle_event(xhci) > 0) {} - temp_64 = readq(&xhci->ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); /* If necessary, update the HW's version of the event ring deq ptr. */ if (event_ring_deq != xhci->event_ring->dequeue) { deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, @@ -2892,7 +2894,7 @@ hw_died: /* Clear the event handler busy flag (RW1C); event ring is empty. */ temp_64 |= ERST_EHB; - writeq(temp_64, &xhci->ir_set->erst_dequeue); + xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); spin_unlock(&xhci->lock); @@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } while (1) { - if (room_on_ring(xhci, ep_ring, num_trbs)) { - union xhci_trb *trb = ep_ring->enqueue; - unsigned int usable = ep_ring->enq_seg->trbs + - TRBS_PER_SEGMENT - 1 - trb; - u32 nop_cmd; - - /* - * Section 4.11.7.1 TD Fragments states that a link - * TRB must only occur at the boundary between - * data bursts (eg 512 bytes for 480M). - * While it is possible to split a large fragment - * we don't know the size yet. - * Simplest solution is to fill the trb before the - * LINK with nop commands. - */ - if (num_trbs == 1 || num_trbs <= usable || usable == 0) - break; - - if (ep_ring->type != TYPE_BULK) - /* - * While isoc transfers might have a buffer that - * crosses a 64k boundary it is unlikely. - * Since we can't add NOPs without generating - * gaps in the traffic just hope it never - * happens at the end of the ring. - * This could be fixed by writing a LINK TRB - * instead of the first NOP - however the - * TRB_TYPE_LINK_LE32() calls would all need - * changing to check the ring length. - */ - break; - - if (num_trbs >= TRBS_PER_SEGMENT) { - xhci_err(xhci, "Too many fragments %d, max %d\n", - num_trbs, TRBS_PER_SEGMENT - 1); - return -EINVAL; - } - - nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) | - ep_ring->cycle_state); - ep_ring->num_trbs_free -= usable; - do { - trb->generic.field[0] = 0; - trb->generic.field[1] = 0; - trb->generic.field[2] = 0; - trb->generic.field[3] = nop_cmd; - trb++; - } while (--usable); - ep_ring->enqueue = trb; - if (room_on_ring(xhci, ep_ring, num_trbs)) - break; - } + if (room_on_ring(xhci, ep_ring, num_trbs)) + break; if (ep_ring == xhci->cmd_ring) { xhci_err(xhci, "Do not support expand command ring\n"); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ad364394885a..6fe577d46fa2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd) xhci_dbg(xhci, "Event ring:\n"); xhci_debug_ring(xhci, xhci->event_ring); xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp_64 = readq(&xhci->ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "ERST deq = 64'h%0lx", (long unsigned int) temp_64); @@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci) { xhci->s3.command = readl(&xhci->op_regs->command); xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); - xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr); + xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); - xhci->s3.erst_base = readq(&xhci->ir_set->erst_base); - xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue); + xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); + xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); } @@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) { writel(xhci->s3.command, &xhci->op_regs->command); writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); - writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); + xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); - writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base); - writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); + xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); + xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); } @@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) u64 val_64; /* step 2: initialize command ring buffer */ - val_64 = readq(&xhci->op_regs->cmd_ring); + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue) & @@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Setting command ring address to 0x%llx", (long unsigned long) val_64); - writeq(val_64, &xhci->op_regs->cmd_ring); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); } /* @@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (ret) { return ret; } - temp_64 = readq(&xhci->op_regs->dcbaa_ptr); + temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Op regs DCBAA ptr = %#016llx", temp_64); xhci_dbg_trace(xhci, trace_xhci_dbg_address, @@ -4730,11 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) struct device *dev = hcd->self.controller; int retval; - /* Limit the block layer scatter-gather lists to half a segment. */ - hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2; - - /* support to build packet from discontinuous buffers */ - hcd->self.no_sg_constraint = 1; + /* Accept arbitrarily long scatter-gather lists */ + hcd->self.sg_tablesize = ~0; /* XHCI controllers don't stop the ep queue on short packets :| */ hcd->self.no_stop_on_short = 1; @@ -4760,6 +4757,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ + xhci = hcd_to_xhci(hcd); + /* + * Support arbitrarily aligned sg-list entries on hosts without + * TD fragment rules (which are currently unsupported). + */ + if (xhci->hci_version < 0x100) + hcd->self.no_sg_constraint = 1; + return 0; } @@ -4788,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x96) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + if (xhci->hci_version < 0x100) + hcd->self.no_sg_constraint = 1; + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f8416639bf31..58ed9d088e63 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -28,17 +28,6 @@ #include <linux/kernel.h> #include <linux/usb/hcd.h> -/* - * Registers should always be accessed with double word or quad word accesses. - * - * Some xHCI implementations may support 64-bit address pointers. Registers - * with 64-bit address pointers should be written to with dword accesses by - * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. - * xHCI implementations that do not support 64-bit address pointers will ignore - * the high dword, and write order is irrelevant. - */ -#include <asm-generic/io-64-nonatomic-lo-hi.h> - /* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" #include "pci-quirks.h" @@ -1279,7 +1268,7 @@ union xhci_trb { * since the command ring is 64-byte aligned. * It must also be greater than 16. */ -#define TRBS_PER_SEGMENT 256 +#define TRBS_PER_SEGMENT 64 /* Allow two commands + a link TRB, along with any reserved command TRBs */ #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) @@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) #define xhci_warn_ratelimited(xhci, fmt, args...) \ dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +/* + * Registers should always be accessed with double word or quad word accesses. + * + * Some xHCI implementations may support 64-bit address pointers. Registers + * with 64-bit address pointers should be written to with dword accesses by + * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. + * xHCI implementations that do not support 64-bit address pointers will ignore + * the high dword, and write order is irrelevant. + */ +static inline u64 xhci_read_64(const struct xhci_hcd *xhci, + __le64 __iomem *regs) +{ + __u32 __iomem *ptr = (__u32 __iomem *) regs; + u64 val_lo = readl(ptr); + u64 val_hi = readl(ptr + 1); + return val_lo + (val_hi << 32); +} +static inline void xhci_write_64(struct xhci_hcd *xhci, + const u64 val, __le64 __iomem *regs) +{ + __u32 __iomem *ptr = (__u32 __iomem *) regs; + u32 val_lo = lower_32_bits(val); + u32 val_hi = upper_32_bits(val); + + writel(val_lo, ptr); + writel(val_hi, ptr + 1); +} + static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) { return xhci->quirks & XHCI_LINK_TRB_QUIRK; diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index e6f61e4361df..8afa813d690b 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type) phy = __usb_find_phy(&phy_list, type); if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { - pr_err("unable to find transceiver of type %s\n", + pr_debug("PHY: unable to find transceiver of type %s\n", usb_phy_type_string(type)); goto err0; } @@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index) phy = __usb_find_phy_dev(dev, &phy_bind_list, index); if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { - pr_err("unable to find transceiver\n"); + dev_dbg(dev, "unable to find transceiver\n"); goto err0; } @@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index, unsigned long flags; phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL); - if (!phy_bind) { - pr_err("phy_bind(): No memory for phy_bind"); + if (!phy_bind) return -ENOMEM; - } phy_bind->dev_name = dev_name; phy_bind->phy_dev_name = phy_dev_name; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ce0d7b0db012..ee1f00f03c43 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, @@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, + { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) }, { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index a7019d1e3058..1e2d369df86e 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -50,6 +50,7 @@ #define TI_XDS100V2_PID 0xa6d0 #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ +#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */ /* US Interface Navigator (http://www.usinterface.com/) */ #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ @@ -363,6 +364,12 @@ /* Sprog II (Andrew Crosland's SprogII DCC interface) */ #define FTDI_SPROG_II 0xF0C8 +/* + * Two of the Tagsys RFID Readers + */ +#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/ +#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/ + /* an infrared receiver for user access control with IR tags */ #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5c86f57e4afa..216d20affba8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index c65437cfd4a2..968a40201e5f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index f112b079ddfc..fb79775447b0 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS); /* Suunto ANT+ USB Driver */ #define SUUNTO_IDS() \ - { USB_DEVICE(0x0fcf, 0x1008) } + { USB_DEVICE(0x0fcf, 0x1008) }, \ + { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ DEVICE(suunto, SUUNTO_IDS); /* Siemens USB/MPI adapter */ diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 8470e1b114f2..1dd0604d1911 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig @@ -18,7 +18,9 @@ config USB_STORAGE This option depends on 'SCSI' support being enabled, but you probably also need 'SCSI device support: SCSI disk support' - (BLK_DEV_SD) for most USB storage devices. + (BLK_DEV_SD) for most USB storage devices. Some devices also + will require 'Probe all LUNs on each SCSI device' + (SCSI_MULTI_LUN). To compile this driver as a module, choose M here: the module will be called usb-storage. diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 18509e6c21ab..9d38ddc8da49 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host) static int slave_alloc (struct scsi_device *sdev) { + struct us_data *us = host_to_us(sdev->host); + /* * Set the INQUIRY transfer length to 36. We don't use any of * the extra data and many devices choke if asked for more or @@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev) */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); + /* Tell the SCSI layer if we know there is more than one LUN */ + if (us->protocol == USB_PR_BULK && us->max_lun > 0) + sdev->sdev_bflags |= BLIST_FORCELUN; + return 0; } diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 65a6a75066a8..82e8ed0324e3 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160, "Super Top", "USB 2.0 SATA BRIDGE", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ad06255c2ade..adbeb255616a 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */ +UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201, + "Research In Motion", + "BlackBerry Bold 9000", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 ), + /* Reported by Michael Stattmann <michael@stattmann.com> */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 9a68409580d5..a0fa5de210cf 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -70,7 +70,12 @@ enum { }; struct vhost_net_ubuf_ref { - struct kref kref; + /* refcount follows semantics similar to kref: + * 0: object is released + * 1: no outstanding ubufs + * >1: outstanding ubufs + */ + atomic_t refcount; wait_queue_head_t wait; struct vhost_virtqueue *vq; }; @@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq) vhost_net_zcopy_mask |= 0x1 << vq; } -static void vhost_net_zerocopy_done_signal(struct kref *kref) -{ - struct vhost_net_ubuf_ref *ubufs; - - ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); - wake_up(&ubufs->wait); -} - static struct vhost_net_ubuf_ref * vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) { @@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); if (!ubufs) return ERR_PTR(-ENOMEM); - kref_init(&ubufs->kref); + atomic_set(&ubufs->refcount, 1); init_waitqueue_head(&ubufs->wait); ubufs->vq = vq; return ubufs; } -static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) +static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); + int r = atomic_sub_return(1, &ubufs->refcount); + if (unlikely(!r)) + wake_up(&ubufs->wait); + return r; } static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); - wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); + vhost_net_ubuf_put(ubufs); + wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); } static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) @@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) { struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; - int cnt = atomic_read(&ubufs->kref.refcount); + int cnt; + + rcu_read_lock_bh(); /* set len to mark this desc buffers done DMA */ vq->heads[ubuf->desc].len = success ? VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; - vhost_net_ubuf_put(ubufs); + cnt = vhost_net_ubuf_put(ubufs); /* * Trigger polling thread if guest stopped submitting new buffers: - * in this case, the refcount after decrement will eventually reach 1 - * so here it is 2. + * in this case, the refcount after decrement will eventually reach 1. * We also trigger polling periodically after each 16 packets * (the value 16 here is more or less arbitrary, it's tuned to trigger * less than 10% of times). */ - if (cnt <= 2 || !(cnt % 16)) + if (cnt <= 1 || !(cnt % 16)) vhost_poll_queue(&vq->poll); + + rcu_read_unlock_bh(); } /* Expects to be always run from workqueue - which acts as @@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net) msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); ubufs = nvq->ubufs; - kref_get(&ubufs->kref); + atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; } else { msg.msg_control = NULL; @@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n) vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = false; - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); } } @@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f) fput(tx_sock->file); if (rx_sock) fput(rx_sock->file); + /* Make sure no callbacks are outstanding */ + synchronize_rcu_bh(); /* We do an extra flush before freeing memory, * since jobs can re-queue themselves. */ vhost_net_flush(n); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 22262a3a0e2d..dade5b7699bc 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -364,7 +364,7 @@ config FB_SA1100 config FB_IMX tristate "Freescale i.MX1/21/25/27 LCD support" - depends on FB && IMX_HAVE_PLATFORM_IMX_FB + depends on FB && ARCH_MXC select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig index 1129d0e9e640..75c8a8e7efc0 100644 --- a/drivers/video/exynos/Kconfig +++ b/drivers/video/exynos/Kconfig @@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI config EXYNOS_LCD_S6E8AX0 bool "S6E8AX0 MIPI AMOLED LCD Driver" - depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE) + depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE + depends on (LCD_CLASS_DEVICE = y) default n help If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index bbeb8dd7f108..77d6221618f4 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk, *five_taps = false; do { - in_height = DIV_ROUND_UP(height, *decim_y); - in_width = DIV_ROUND_UP(width, *decim_x); + in_height = height / *decim_y; + in_width = width / *decim_x; *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height, out_width, out_height, mem_to_mem); error = (in_width > maxsinglelinewidth || !*core_clk || @@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk, dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); do { - in_height = DIV_ROUND_UP(height, *decim_y); - in_width = DIV_ROUND_UP(width, *decim_x); + in_height = height / *decim_y; + in_width = width / *decim_x; *five_taps = in_height > out_height; if (in_width > maxsinglelinewidth) @@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, { u16 in_width, in_width_max; int decim_x_min = *decim_x; - u16 in_height = DIV_ROUND_UP(height, *decim_y); + u16 in_height = height / *decim_y; const int maxsinglelinewidth = dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); @@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, return -EINVAL; do { - in_width = DIV_ROUND_UP(width, *decim_x); + in_width = width / *decim_x; } while (*decim_x <= *x_predecim && in_width > maxsinglelinewidth && ++*decim_x); @@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane, if (r) return r; - in_width = DIV_ROUND_UP(in_width, x_predecim); - in_height = DIV_ROUND_UP(in_height, y_predecim); + in_width = in_width / x_predecim; + in_height = in_height / y_predecim; if (color_mode == OMAP_DSS_COLOR_YUV2 || color_mode == OMAP_DSS_COLOR_UYVY || diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 7411f2674e16..23ef21ffc2c4 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx { /* outputs */ struct dsi_clock_info dsi_cinfo; - unsigned long long fck; + unsigned long fck; struct dispc_clock_info dispc_cinfo; }; diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index efb9ee9e3c96..ba806c9e7f54 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -46,7 +46,7 @@ static struct { struct sdi_clk_calc_ctx { unsigned long pck_min, pck_max; - unsigned long long fck; + unsigned long fck; struct dispc_clock_info dispc_cinfo; }; diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index a06edbfa95ca..1b5d48c578e1 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image, if (done == count) goto out; } - if ((uintptr_t)addr & 0x2) { + if ((uintptr_t)(addr + done) & 0x2) { if ((count - done) < 2) { *(u8 *)(buf + done) = ioread8(addr + done); done += 1; @@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image, if (done == count) goto out; } - if ((uintptr_t)addr & 0x2) { + if ((uintptr_t)(addr + done) & 0x2) { if ((count - done) < 2) { iowrite8(*(u8 *)(buf + done), addr + done); done += 1; diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 16830d8b777c..9911cd5fddb5 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, if (done == count) goto out; } - if ((uintptr_t)addr & 0x2) { + if ((uintptr_t)(addr + done) & 0x2) { if ((count - done) < 2) { *(u8 *)(buf + done) = ioread8(addr + done); done += 1; @@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, if (done == count) goto out; } - if ((uintptr_t)addr & 0x2) { + if ((uintptr_t)(addr + done) & 0x2) { if ((count - done) < 2) { iowrite8(*(u8 *)(buf + done), addr + done); done += 1; diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index d75c811bfa56..45e00afa7f2d 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o dom0-$(CONFIG_X86) += pcpu.o obj-$(CONFIG_XEN_DOM0) += $(dom0-y) obj-$(CONFIG_BLOCK) += biomerge.o -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 4672e003c0ad..f4a9e3311297 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn) irq = ret; goto out; } + /* New interdomain events are bound to VCPU 0. */ + bind_evtchn_to_cpu(evtchn, 0); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c deleted file mode 100644 index 4793fc594549..000000000000 --- a/drivers/xen/xencomm.c +++ /dev/null @@ -1,219 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Copyright (C) IBM Corp. 2006 - * - * Authors: Hollis Blanchard <hollisb@us.ibm.com> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/mm.h> -#include <linux/slab.h> -#include <asm/page.h> -#include <xen/xencomm.h> -#include <xen/interface/xen.h> -#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */ - -static int xencomm_init(struct xencomm_desc *desc, - void *buffer, unsigned long bytes) -{ - unsigned long recorded = 0; - int i = 0; - - while ((recorded < bytes) && (i < desc->nr_addrs)) { - unsigned long vaddr = (unsigned long)buffer + recorded; - unsigned long paddr; - int offset; - int chunksz; - - offset = vaddr % PAGE_SIZE; /* handle partial pages */ - chunksz = min(PAGE_SIZE - offset, bytes - recorded); - - paddr = xencomm_vtop(vaddr); - if (paddr == ~0UL) { - printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n", - __func__, vaddr); - return -EINVAL; - } - - desc->address[i++] = paddr; - recorded += chunksz; - } - - if (recorded < bytes) { - printk(KERN_DEBUG - "%s: could only translate %ld of %ld bytes\n", - __func__, recorded, bytes); - return -ENOSPC; - } - - /* mark remaining addresses invalid (just for safety) */ - while (i < desc->nr_addrs) - desc->address[i++] = XENCOMM_INVALID; - - desc->magic = XENCOMM_MAGIC; - - return 0; -} - -static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, - void *buffer, unsigned long bytes) -{ - struct xencomm_desc *desc; - unsigned long buffer_ulong = (unsigned long)buffer; - unsigned long start = buffer_ulong & PAGE_MASK; - unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; - unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; - unsigned long size = sizeof(*desc) + - sizeof(desc->address[0]) * nr_addrs; - - /* - * slab allocator returns at least sizeof(void*) aligned pointer. - * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might - * cross page boundary. - */ - if (sizeof(*desc) > sizeof(void *)) { - unsigned long order = get_order(size); - desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, - order); - if (desc == NULL) - return NULL; - - desc->nr_addrs = - ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / - sizeof(*desc->address); - } else { - desc = kmalloc(size, gfp_mask); - if (desc == NULL) - return NULL; - - desc->nr_addrs = nr_addrs; - } - return desc; -} - -void xencomm_free(struct xencomm_handle *desc) -{ - if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { - struct xencomm_desc *desc__ = (struct xencomm_desc *)desc; - if (sizeof(*desc__) > sizeof(void *)) { - unsigned long size = sizeof(*desc__) + - sizeof(desc__->address[0]) * desc__->nr_addrs; - unsigned long order = get_order(size); - free_pages((unsigned long)__va(desc), order); - } else - kfree(__va(desc)); - } -} - -static int xencomm_create(void *buffer, unsigned long bytes, - struct xencomm_desc **ret, gfp_t gfp_mask) -{ - struct xencomm_desc *desc; - int rc; - - pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes); - - if (bytes == 0) { - /* don't create a descriptor; Xen recognizes NULL. */ - BUG_ON(buffer != NULL); - *ret = NULL; - return 0; - } - - BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ - - desc = xencomm_alloc(gfp_mask, buffer, bytes); - if (!desc) { - printk(KERN_DEBUG "%s failure\n", "xencomm_alloc"); - return -ENOMEM; - } - - rc = xencomm_init(desc, buffer, bytes); - if (rc) { - printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc); - xencomm_free((struct xencomm_handle *)__pa(desc)); - return rc; - } - - *ret = desc; - return 0; -} - -static struct xencomm_handle *xencomm_create_inline(void *ptr) -{ - unsigned long paddr; - - BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr)); - - paddr = (unsigned long)xencomm_pa(ptr); - BUG_ON(paddr & XENCOMM_INLINE_FLAG); - return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG); -} - -/* "mini" routine, for stack-based communications: */ -static int xencomm_create_mini(void *buffer, - unsigned long bytes, struct xencomm_mini *xc_desc, - struct xencomm_desc **ret) -{ - int rc = 0; - struct xencomm_desc *desc; - BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0); - - desc = (void *)xc_desc; - - desc->nr_addrs = XENCOMM_MINI_ADDRS; - - rc = xencomm_init(desc, buffer, bytes); - if (!rc) - *ret = desc; - - return rc; -} - -struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) -{ - int rc; - struct xencomm_desc *desc; - - if (xencomm_is_phys_contiguous((unsigned long)ptr)) - return xencomm_create_inline(ptr); - - rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); - - if (rc || desc == NULL) - return NULL; - - return xencomm_pa(desc); -} - -struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, - struct xencomm_mini *xc_desc) -{ - int rc; - struct xencomm_desc *desc = NULL; - - if (xencomm_is_phys_contiguous((unsigned long)ptr)) - return xencomm_create_inline(ptr); - - rc = xencomm_create_mini(ptr, bytes, xc_desc, - &desc); - - if (rc) - return NULL; - - return xencomm_pa(desc); -} diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 0bad24ddc2e7..0129b78a6908 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio) } EXPORT_SYMBOL(bio_integrity_free); +static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip) +{ + if (bip->bip_slab == BIO_POOL_NONE) + return BIP_INLINE_VECS; + + return bvec_nr_vecs(bip->bip_slab); +} + /** * bio_integrity_add_page - Attach integrity metadata * @bio: bio to update @@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, struct bio_integrity_payload *bip = bio->bi_integrity; struct bio_vec *iv; - if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { + if (bip->bip_vcnt >= bip_integrity_vecs(bip)) { printk(KERN_ERR "%s: bip_vec full\n", __func__); return 0; } @@ -226,7 +234,8 @@ unsigned int bio_integrity_tag_size(struct bio *bio) } EXPORT_SYMBOL(bio_integrity_tag_size); -int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set) +static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, + int set) { struct bio_integrity_payload *bip = bio->bi_integrity; struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); @@ -611,7 +611,6 @@ EXPORT_SYMBOL(bio_clone_fast); struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, struct bio_set *bs) { - unsigned nr_iovecs = 0; struct bvec_iter iter; struct bio_vec bv; struct bio *bio; @@ -638,10 +637,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, * __bio_clone_fast() anyways. */ - bio_for_each_segment(bv, bio_src, iter) - nr_iovecs++; - - bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs); + bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); if (!bio) return NULL; @@ -650,9 +646,18 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; + if (bio->bi_rw & REQ_DISCARD) + goto integrity_clone; + + if (bio->bi_rw & REQ_WRITE_SAME) { + bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; + goto integrity_clone; + } + bio_for_each_segment(bv, bio_src, iter) bio->bi_io_vec[bio->bi_vcnt++] = bv; +integrity_clone: if (bio_integrity(bio_src)) { int ret; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5215f04260b2..81ea55314b1f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3839,7 +3839,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, rb_erase(&ref->rb_node, &head->ref_root); atomic_dec(&delayed_refs->num_entries); btrfs_put_delayed_ref(ref); - cond_resched_lock(&head->lock); } if (head->must_insert_reserved) pin_bytes = true; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 184e9cb39647..d3d44486290b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5154,7 +5154,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return ERR_CAST(inode); } - return d_splice_alias(inode, dentry); + return d_materialise_unique(dentry, inode); } unsigned char btrfs_filetype_table[] = { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 383ab455bfa7..a6d8efa46bfe 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3537,20 +3537,6 @@ out: return ret; } -static long btrfs_ioctl_global_rsv(struct btrfs_root *root, void __user *arg) -{ - struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv; - u64 reserved; - - spin_lock(&block_rsv->lock); - reserved = block_rsv->reserved; - spin_unlock(&block_rsv->lock); - - if (arg && copy_to_user(arg, &reserved, sizeof(reserved))) - return -EFAULT; - return 0; -} - /* * there are many ways the trans_start and trans_end ioctls can lead * to deadlocks. They should only be used by applications that @@ -4757,8 +4743,6 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_logical_to_ino(root, argp); case BTRFS_IOC_SPACE_INFO: return btrfs_ioctl_space_info(root, argp); - case BTRFS_IOC_GLOBAL_RSV: - return btrfs_ioctl_global_rsv(root, argp); case BTRFS_IOC_SYNC: { int ret; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 9c8d1a3fdc3a..9dde9717c1b9 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1332,6 +1332,16 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " } if (cur_clone_root) { + if (compressed != BTRFS_COMPRESS_NONE) { + /* + * Offsets given by iterate_extent_inodes() are relative + * to the start of the extent, we need to add logical + * offset from the file extent item. + * (See why at backref.c:check_extent_in_eb()) + */ + cur_clone_root->offset += btrfs_file_extent_offset(eb, + fi); + } *found = cur_clone_root; ret = 0; } else { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 97cc24198554..d04db817be5c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -566,7 +566,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) kfree(num); if (info->max_inline) { - info->max_inline = max_t(u64, + info->max_inline = min_t(u64, info->max_inline, root->sectorsize); } @@ -855,6 +855,7 @@ static struct dentry *get_default_root(struct super_block *sb, struct btrfs_path *path; struct btrfs_key location; struct inode *inode; + struct dentry *dentry; u64 dir_id; int new = 0; @@ -925,7 +926,13 @@ setup_root: return dget(sb->s_root); } - return d_obtain_alias(inode); + dentry = d_obtain_alias(inode); + if (!IS_ERR(dentry)) { + spin_lock(&dentry->d_lock); + dentry->d_flags &= ~DCACHE_DISCONNECTED; + spin_unlock(&dentry->d_lock); + } + return dentry; } static int btrfs_fill_super(struct super_block *sb, diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 782374d8fd19..865f4cf9a769 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -578,8 +578,14 @@ static int add_device_membership(struct btrfs_fs_info *fs_info) return -ENOMEM; list_for_each_entry(dev, &fs_devices->devices, dev_list) { - struct hd_struct *disk = dev->bdev->bd_part; - struct kobject *disk_kobj = &part_to_dev(disk)->kobj; + struct hd_struct *disk; + struct kobject *disk_kobj; + + if (!dev->bdev) + continue; + + disk = dev->bdev->bd_part; + disk_kobj = &part_to_dev(disk)->kobj; error = sysfs_create_link(fs_info->device_dir_kobj, disk_kobj, disk_kobj->name); diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 4c2d452c4bfc..21887d63dad5 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -54,11 +54,6 @@ static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode, return acl; } -void ceph_forget_all_cached_acls(struct inode *inode) -{ - forget_all_cached_acls(inode); -} - struct posix_acl *ceph_get_acl(struct inode *inode, int type) { int size; @@ -160,11 +155,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) goto out_dput; } - if (value) - ret = __ceph_setxattr(dentry, name, value, size, 0); - else - ret = __ceph_removexattr(dentry, name); - + ret = __ceph_setxattr(dentry, name, value, size, 0); if (ret) { if (new_mode != old_mode) { newattrs.ia_mode = old_mode; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 6da4df84ba30..45eda6d7a40c 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -100,6 +100,14 @@ static unsigned fpos_off(loff_t p) return p & 0xffffffff; } +static int fpos_cmp(loff_t l, loff_t r) +{ + int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); + if (v) + return v; + return (int)(fpos_off(l) - fpos_off(r)); +} + /* * When possible, we try to satisfy a readdir by peeking at the * dcache. We make this work by carefully ordering dentries on @@ -156,7 +164,7 @@ more: if (!d_unhashed(dentry) && dentry->d_inode && ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && - ctx->pos <= di->offset) + fpos_cmp(ctx->pos, di->offset) <= 0) break; dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, dentry->d_name.len, dentry->d_name.name, di->offset, @@ -695,9 +703,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry, ceph_mdsc_put_request(req); if (!err) - err = ceph_init_acl(dentry, dentry->d_inode, dir); - - if (err) + ceph_init_acl(dentry, dentry->d_inode, dir); + else d_drop(dentry); return err; } @@ -735,7 +742,9 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); ceph_mdsc_put_request(req); - if (err) + if (!err) + ceph_init_acl(dentry, dentry->d_inode, dir); + else d_drop(dentry); return err; } @@ -776,7 +785,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) err = ceph_handle_notrace_create(dir, dentry); ceph_mdsc_put_request(req); out: - if (err < 0) + if (!err) + ceph_init_acl(dentry, dentry->d_inode, dir); + else d_drop(dentry); return err; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index dfd2ce3419f8..09c7afe32e49 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -286,6 +286,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, } else { dout("atomic_open finish_open on dn %p\n", dn); if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { + ceph_init_acl(dentry, dentry->d_inode, dir); *opened |= FILE_CREATED; } err = finish_open(file, dentry, ceph_open, opened); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 2df963f1cf5a..10a4ccbf38da 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -144,7 +144,11 @@ enum { Opt_ino32, Opt_noino32, Opt_fscache, - Opt_nofscache + Opt_nofscache, +#ifdef CONFIG_CEPH_FS_POSIX_ACL + Opt_acl, +#endif + Opt_noacl }; static match_table_t fsopt_tokens = { @@ -172,6 +176,10 @@ static match_table_t fsopt_tokens = { {Opt_noino32, "noino32"}, {Opt_fscache, "fsc"}, {Opt_nofscache, "nofsc"}, +#ifdef CONFIG_CEPH_FS_POSIX_ACL + {Opt_acl, "acl"}, +#endif + {Opt_noacl, "noacl"}, {-1, NULL} }; @@ -271,6 +279,14 @@ static int parse_fsopt_token(char *c, void *private) case Opt_nofscache: fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; break; +#ifdef CONFIG_CEPH_FS_POSIX_ACL + case Opt_acl: + fsopt->sb_flags |= MS_POSIXACL; + break; +#endif + case Opt_noacl: + fsopt->sb_flags &= ~MS_POSIXACL; + break; default: BUG_ON(token); } @@ -438,6 +454,13 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) else seq_puts(m, ",nofsc"); +#ifdef CONFIG_CEPH_FS_POSIX_ACL + if (fsopt->sb_flags & MS_POSIXACL) + seq_puts(m, ",acl"); + else + seq_puts(m, ",noacl"); +#endif + if (fsopt->wsize) seq_printf(m, ",wsize=%d", fsopt->wsize); if (fsopt->rsize != CEPH_RSIZE_DEFAULT) @@ -819,9 +842,6 @@ static int ceph_set_super(struct super_block *s, void *data) s->s_flags = fsc->mount_options->sb_flags; s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ -#ifdef CONFIG_CEPH_FS_POSIX_ACL - s->s_flags |= MS_POSIXACL; -#endif s->s_xattr = ceph_xattr_handlers; s->s_fs_info = fsc; @@ -911,6 +931,10 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, struct ceph_options *opt = NULL; dout("ceph_mount\n"); + +#ifdef CONFIG_CEPH_FS_POSIX_ACL + flags |= MS_POSIXACL; +#endif err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); if (err < 0) { res = ERR_PTR(err); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 19793b56d0a7..d8801a95b685 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -13,6 +13,7 @@ #include <linux/wait.h> #include <linux/writeback.h> #include <linux/slab.h> +#include <linux/posix_acl.h> #include <linux/ceph/libceph.h> @@ -743,7 +744,11 @@ extern const struct xattr_handler *ceph_xattr_handlers[]; struct posix_acl *ceph_get_acl(struct inode *, int); int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); int ceph_init_acl(struct dentry *, struct inode *, struct inode *); -void ceph_forget_all_cached_acls(struct inode *inode); + +static inline void ceph_forget_all_cached_acls(struct inode *inode) +{ + forget_all_cached_acls(inode); +} #else diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 898b6565ad3e..a55ec37378c6 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -12,6 +12,9 @@ #define XATTR_CEPH_PREFIX "ceph." #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) +static int __remove_xattr(struct ceph_inode_info *ci, + struct ceph_inode_xattr *xattr); + /* * List of handlers for synthetic system.* attributes. Other * attributes are handled directly. @@ -319,8 +322,7 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode, static int __set_xattr(struct ceph_inode_info *ci, const char *name, int name_len, const char *val, int val_len, - int dirty, - int should_free_name, int should_free_val, + int flags, int update_xattr, struct ceph_inode_xattr **newxattr) { struct rb_node **p; @@ -349,12 +351,31 @@ static int __set_xattr(struct ceph_inode_info *ci, xattr = NULL; } + if (update_xattr) { + int err = 0; + if (xattr && (flags & XATTR_CREATE)) + err = -EEXIST; + else if (!xattr && (flags & XATTR_REPLACE)) + err = -ENODATA; + if (err) { + kfree(name); + kfree(val); + return err; + } + if (update_xattr < 0) { + if (xattr) + __remove_xattr(ci, xattr); + kfree(name); + return 0; + } + } + if (!xattr) { new = 1; xattr = *newxattr; xattr->name = name; xattr->name_len = name_len; - xattr->should_free_name = should_free_name; + xattr->should_free_name = update_xattr; ci->i_xattrs.count++; dout("__set_xattr count=%d\n", ci->i_xattrs.count); @@ -364,7 +385,7 @@ static int __set_xattr(struct ceph_inode_info *ci, if (xattr->should_free_val) kfree((void *)xattr->val); - if (should_free_name) { + if (update_xattr) { kfree((void *)name); name = xattr->name; } @@ -379,8 +400,8 @@ static int __set_xattr(struct ceph_inode_info *ci, xattr->val = ""; xattr->val_len = val_len; - xattr->dirty = dirty; - xattr->should_free_val = (val && should_free_val); + xattr->dirty = update_xattr; + xattr->should_free_val = (val && update_xattr); if (new) { rb_link_node(&xattr->node, parent, p); @@ -442,7 +463,7 @@ static int __remove_xattr(struct ceph_inode_info *ci, struct ceph_inode_xattr *xattr) { if (!xattr) - return -EOPNOTSUPP; + return -ENODATA; rb_erase(&xattr->node, &ci->i_xattrs.index); @@ -588,7 +609,7 @@ start: p += len; err = __set_xattr(ci, name, namelen, val, len, - 0, 0, 0, &xattrs[numattr]); + 0, 0, &xattrs[numattr]); if (err < 0) goto bad; @@ -850,6 +871,9 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name, dout("setxattr value=%.*s\n", (int)size, value); + if (!value) + flags |= CEPH_XATTR_REMOVE; + /* do request */ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, USE_AUTH_MDS); @@ -892,7 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name, struct ceph_inode_info *ci = ceph_inode(inode); int issued; int err; - int dirty; + int dirty = 0; int name_len = strlen(name); int val_len = size; char *newname = NULL; @@ -953,12 +977,14 @@ retry: goto retry; } - err = __set_xattr(ci, newname, name_len, newval, - val_len, 1, 1, 1, &xattr); + err = __set_xattr(ci, newname, name_len, newval, val_len, + flags, value ? 1 : -1, &xattr); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); - ci->i_xattrs.dirty = true; - inode->i_ctime = CURRENT_TIME; + if (!err) { + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + ci->i_xattrs.dirty = true; + inode->i_ctime = CURRENT_TIME; + } spin_unlock(&ci->i_ceph_lock); if (dirty) diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index c819b0bd491a..7ff866dbb89e 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -865,8 +865,8 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, return rc; } -static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, - __u16 fid, u32 *pacllen) +struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, + const struct cifs_fid *cifsfid, u32 *pacllen) { struct cifs_ntsd *pntsd = NULL; unsigned int xid; @@ -877,7 +877,8 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, return ERR_CAST(tlink); xid = get_xid(); - rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); + rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, + pacllen); free_xid(xid); cifs_put_tlink(tlink); @@ -946,7 +947,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, if (!open_file) return get_cifs_acl_by_path(cifs_sb, path, pacllen); - pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen); + pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen); cifsFileInfo_put(open_file); return pntsd; } @@ -1006,19 +1007,31 @@ out: /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, - struct inode *inode, const char *path, const __u16 *pfid) + struct inode *inode, const char *path, + const struct cifs_fid *pfid) { struct cifs_ntsd *pntsd = NULL; u32 acllen = 0; int rc = 0; + struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); + struct cifs_tcon *tcon; cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); - if (pfid) - pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); - else - pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen); + if (IS_ERR(tlink)) + return PTR_ERR(tlink); + tcon = tlink_tcon(tlink); + if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) + pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, + &acllen); + else if (tcon->ses->server->ops->get_acl) + pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, + &acllen); + else { + cifs_put_tlink(tlink); + return -EOPNOTSUPP; + } /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ if (IS_ERR(pntsd)) { rc = PTR_ERR(pntsd); @@ -1030,6 +1043,8 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); } + cifs_put_tlink(tlink); + return rc; } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 86dc28c7aa5c..cf32f0393369 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -398,6 +398,8 @@ struct smb_version_operations { const struct nls_table *, int); struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, const char *, u32 *); + struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *, + const struct cifs_fid *, u32 *); int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, int); }; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index d00e09dfc452..acc4ee8ed075 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -151,7 +151,7 @@ extern struct inode *cifs_iget(struct super_block *sb, extern int cifs_get_inode_info(struct inode **inode, const char *full_path, FILE_ALL_INFO *data, struct super_block *sb, - int xid, const __u16 *fid); + int xid, const struct cifs_fid *fid); extern int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *search_path, struct super_block *sb, unsigned int xid); @@ -162,11 +162,13 @@ extern int cifs_rename_pending_delete(const char *full_path, const unsigned int xid); extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, struct inode *inode, - const char *path, const __u16 *pfid); + const char *path, const struct cifs_fid *pfid); extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, kuid_t, kgid_t); extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, const char *, u32 *); +extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *, + const struct cifs_fid *, u32 *); extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, const char *, int); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index d3a6796caa5a..3db0c5fd9a11 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -378,7 +378,7 @@ cifs_create_get_file_info: xid); else { rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, - xid, &fid->netfid); + xid, fid); if (newinode) { if (server->ops->set_lease_key) server->ops->set_lease_key(newinode, fid); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 755584684f6c..53c15074bb36 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, - xid, &fid->netfid); + xid, fid); out: kfree(buf); @@ -2389,7 +2389,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { unsigned long nr_pages, i; - size_t copied, len, cur_len; + size_t bytes, copied, len, cur_len; ssize_t total_written = 0; loff_t offset; struct iov_iter it; @@ -2444,14 +2444,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, save_len = cur_len; for (i = 0; i < nr_pages; i++) { - copied = min_t(const size_t, cur_len, PAGE_SIZE); + bytes = min_t(const size_t, cur_len, PAGE_SIZE); copied = iov_iter_copy_from_user(wdata->pages[i], &it, - 0, copied); + 0, bytes); cur_len -= copied; iov_iter_advance(&it, copied); + /* + * If we didn't copy as much as we expected, then that + * may mean we trod into an unmapped area. Stop copying + * at that point. On the next pass through the big + * loop, we'll likely end up getting a zero-length + * write and bailing out of it. + */ + if (copied < bytes) + break; } cur_len = save_len - cur_len; + /* + * If we have no data to send, then that probably means that + * the copy above failed altogether. That's most likely because + * the address in the iovec was bogus. Set the rc to -EFAULT, + * free anything we allocated and bail out. + */ + if (!cur_len) { + for (i = 0; i < nr_pages; i++) + put_page(wdata->pages[i]); + kfree(wdata); + rc = -EFAULT; + break; + } + + /* + * i + 1 now represents the number of pages we actually used in + * the copy phase above. Bring nr_pages down to that, and free + * any pages that we didn't use. + */ + for ( ; nr_pages > i + 1; nr_pages--) + put_page(wdata->pages[nr_pages - 1]); + wdata->sync_mode = WB_SYNC_ALL; wdata->nr_pages = nr_pages; wdata->offset = (__u64)offset; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index be58b8fcdb3c..aadc2b68678b 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -677,7 +677,7 @@ cgfi_exit: int cifs_get_inode_info(struct inode **inode, const char *full_path, FILE_ALL_INFO *data, struct super_block *sb, int xid, - const __u16 *fid) + const struct cifs_fid *fid) { bool validinum = false; __u16 srchflgs; diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index bfd66d84831e..526fb89f9230 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -1073,6 +1073,7 @@ struct smb_version_operations smb1_operations = { #endif /* CIFS_XATTR */ #ifdef CONFIG_CIFS_ACL .get_acl = get_cifs_acl, + .get_acl_by_fid = get_cifs_acl_by_fid, .set_acl = set_cifs_acl, #endif /* CIFS_ACL */ }; diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index c38350851b08..bc0bb9c34f72 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -57,4 +57,7 @@ #define SMB2_CMACAES_SIZE (16) #define SMB3_SIGNKEY_SIZE (16) +/* Maximum buffer size value we can send with 1 credit */ +#define SMB2_MAX_BUFFER_SIZE 65536 + #endif /* _SMB2_GLOB_H */ diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 757da3e54d3d..192f51a12cf1 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) /* start with specified wsize, or default */ wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); - /* - * limit write size to 2 ** 16, because we don't support multicredit - * requests now. - */ - wsize = min_t(unsigned int, wsize, 2 << 15); + /* set it to the maximum buffer size value we can send with 1 credit */ + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); return wsize; } @@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) /* start with specified rsize, or default */ rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read); - /* - * limit write size to 2 ** 16, because we don't support multicredit - * requests now. - */ - rsize = min_t(unsigned int, rsize, 2 << 15); + /* set it to the maximum buffer size value we can send with 1 credit */ + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); return rsize; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index a3f7a9c3cc69..860344701067 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) /* SMB2 only has an extended negflavor */ server->negflavor = CIFS_NEGFLAVOR_EXTENDED; - server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); + /* set it to the maximum buffer size value we can send with 1 credit */ + server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), + SMB2_MAX_BUFFER_SIZE); server->max_read = le32_to_cpu(rsp->MaxReadSize); server->max_write = le32_to_cpu(rsp->MaxWriteSize); /* BB Do we need to validate the SecurityMode? */ diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ece55565b9cd..d3a534fdc5ff 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -771,6 +771,8 @@ do { \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ (einode)->xtime.tv_sec = \ (signed)le32_to_cpu((raw_inode)->xtime); \ + else \ + (einode)->xtime.tv_sec = 0; \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ ext4_decode_extra_time(&(einode)->xtime, \ raw_inode->xtime ## _extra); \ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 10cff4736b11..74bc2d549c58 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3906,6 +3906,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, } else err = ret; map->m_flags |= EXT4_MAP_MAPPED; + map->m_pblk = newblock; if (allocated > map->m_len) allocated = map->m_len; map->m_len = allocated; diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 6bea80614d77..a2a837f00407 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -140,7 +140,7 @@ static long swap_inode_boot_loader(struct super_block *sb, handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); if (IS_ERR(handle)) { err = -EINVAL; - goto swap_boot_out; + goto journal_err_out; } /* Protect extent tree against block allocations via delalloc */ @@ -198,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb, ext4_double_up_write_data_sem(inode, inode_bl); +journal_err_out: ext4_inode_resume_unlocked_dio(inode); ext4_inode_resume_unlocked_dio(inode_bl); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index c5adbb318a90..f3b84cd9de56 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb, ext4_group_t group; ext4_group_t last_group; unsigned overhead; + __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; BUG_ON(flex_gd->count == 0 || group_data == NULL); @@ -266,7 +267,7 @@ next_group: src_group++; for (; src_group <= last_group; src_group++) { overhead = ext4_group_overhead_blocks(sb, src_group); - if (overhead != 0) + if (overhead == 0) last_blk += group_data[src_group - group].blocks_count; else break; @@ -280,8 +281,7 @@ next_group: group = ext4_get_group_number(sb, start_blk - 1); group -= group_data[0].group; group_data[group].free_blocks_count--; - if (flexbg_size > 1) - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; + flex_gd->bg_flags[group] &= uninit_mask; } /* Allocate inode bitmaps */ @@ -292,22 +292,30 @@ next_group: group = ext4_get_group_number(sb, start_blk - 1); group -= group_data[0].group; group_data[group].free_blocks_count--; - if (flexbg_size > 1) - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; + flex_gd->bg_flags[group] &= uninit_mask; } /* Allocate inode tables */ for (; it_index < flex_gd->count; it_index++) { - if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) + unsigned int itb = EXT4_SB(sb)->s_itb_per_group; + ext4_fsblk_t next_group_start; + + if (start_blk + itb > last_blk) goto next_group; group_data[it_index].inode_table = start_blk; - group = ext4_get_group_number(sb, start_blk - 1); + group = ext4_get_group_number(sb, start_blk); + next_group_start = ext4_group_first_block_no(sb, group + 1); group -= group_data[0].group; - group_data[group].free_blocks_count -= - EXT4_SB(sb)->s_itb_per_group; - if (flexbg_size > 1) - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; + if (start_blk + itb > next_group_start) { + flex_gd->bg_flags[group + 1] &= uninit_mask; + overhead = start_blk + itb - next_group_start; + group_data[group + 1].free_blocks_count -= overhead; + itb -= overhead; + } + + group_data[group].free_blocks_count -= itb; + flex_gd->bg_flags[group] &= uninit_mask; start_blk += EXT4_SB(sb)->s_itb_per_group; } @@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, start = ext4_group_first_block_no(sb, group); group -= flex_gd->groups[0].group; - count2 = sb->s_blocksize * 8 - (block - start); + count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); if (count2 > count) count2 = count; @@ -620,7 +628,7 @@ handle_ib: if (err) goto out; count = group_table_count[j]; - start = group_data[i].block_bitmap; + start = (&group_data[i].block_bitmap)[j]; block = start; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1f7784de05b6..710fed2377d4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3695,16 +3695,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) for (i = 0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; - i = le32_to_cpu(es->s_flags); - if (i & EXT2_FLAGS_UNSIGNED_HASH) - sbi->s_hash_unsigned = 3; - else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { + if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { + i = le32_to_cpu(es->s_flags); + if (i & EXT2_FLAGS_UNSIGNED_HASH) + sbi->s_hash_unsigned = 3; + else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ - es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); - sbi->s_hash_unsigned = 3; + if (!(sb->s_flags & MS_RDONLY)) + es->s_flags |= + cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); + sbi->s_hash_unsigned = 3; #else - es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); + if (!(sb->s_flags & MS_RDONLY)) + es->s_flags |= + cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif + } } /* Handle clustersize */ diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index e1959efad64f..b5ebc2d7d80d 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c @@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj) struct fscache_object *xobj; struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; + ASSERT(RB_EMPTY_NODE(&obj->objlist_link)); + write_lock(&fscache_object_list_lock); while (*p) { @@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj) */ void fscache_objlist_remove(struct fscache_object *obj) { + if (RB_EMPTY_NODE(&obj->objlist_link)) + return; + write_lock(&fscache_object_list_lock); BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 53d35c504240..d3b4539f1651 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object, object->cache = cache; object->cookie = cookie; object->parent = NULL; +#ifdef CONFIG_FSCACHE_OBJECT_LIST + RB_CLEAR_NODE(&object->objlist_link); +#endif object->oob_event_mask = 0; for (t = object->oob_table; t->events; t++) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 8360674c85bc..60bb365f54a5 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, * similarly constrained call sites */ ret = start_this_handle(journal, handle, GFP_NOFS); - if (ret < 0) + if (ret < 0) { jbd2_journal_free_reserved(handle); + return ret; + } handle->h_type = type; handle->h_line_no = line_no; - return ret; + return 0; } EXPORT_SYMBOL(jbd2_journal_start_reserved); diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index e973b85d6afd..5a8ea16eedbc 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -86,6 +86,8 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type, rc = posix_acl_equiv_mode(acl, &inode->i_mode); if (rc < 0) return rc; + inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); if (rc == 0) acl = NULL; break; diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index e066a3902973..ab798a88ec1d 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block) struct nlm_file *file = block->b_file; struct nlm_lock *lock = &block->b_call->a_args.lock; int error; + loff_t fl_start, fl_end; dprintk("lockd: grant blocked lock %p\n", block); @@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block) } /* Try the lock operation again */ + /* vfs_lock_file() can mangle fl_start and fl_end, but we need + * them unchanged for the GRANT_MSG + */ lock->fl.fl_flags |= FL_SLEEP; + fl_start = lock->fl.fl_start; + fl_end = lock->fl.fl_end; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; + lock->fl.fl_start = fl_start; + lock->fl.fl_end = fl_end; switch (error) { case 0: diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index d3a587144222..d190e33d0ec2 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -151,17 +151,15 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(pacl)) return PTR_ERR(pacl); - /* allocate for worst case: one (deny, allow) pair each: */ - size += 2 * pacl->a_count; } + /* allocate for worst case: one (deny, allow) pair each: */ + size += 2 * pacl->a_count; if (S_ISDIR(inode->i_mode)) { flags = NFS4_ACL_DIR; dpacl = get_acl(inode, ACL_TYPE_DEFAULT); if (dpacl) size += 2 * dpacl->a_count; - } else { - dpacl = NULL; } *acl = nfs4_acl_new(size); @@ -170,8 +168,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, goto out; } - if (pacl) - _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); + _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); if (dpacl) _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 2b7882b508db..9a3c68cf6026 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -324,23 +324,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h switch (flag) { case M_INSERT: /* insert item into L[0] */ - if (item_pos == tb->lnum[0] - 1 - && tb->lbytes != -1) { + if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { /* part of new item falls into L[0] */ int new_item_len; int version; - ret_val = - leaf_shift_left(tb, tb->lnum[0] - 1, - -1); + ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1); /* Calculate item length to insert to S[0] */ - new_item_len = - ih_item_len(ih) - tb->lbytes; + new_item_len = ih_item_len(ih) - tb->lbytes; /* Calculate and check item length to insert to L[0] */ - put_ih_item_len(ih, - ih_item_len(ih) - - new_item_len); + put_ih_item_len(ih, ih_item_len(ih) - new_item_len); RFALSE(ih_item_len(ih) <= 0, "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", @@ -349,30 +343,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h /* Insert new item into L[0] */ buffer_info_init_left(tb, &bi); leaf_insert_into_buf(&bi, - n + item_pos - - ret_val, ih, body, - zeros_num > - ih_item_len(ih) ? - ih_item_len(ih) : - zeros_num); + n + item_pos - ret_val, ih, body, + zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num); version = ih_version(ih); /* Calculate key component, item length and body to insert into S[0] */ - set_le_ih_k_offset(ih, - le_ih_k_offset(ih) + - (tb-> - lbytes << - (is_indirect_le_ih - (ih) ? tb->tb_sb-> - s_blocksize_bits - - UNFM_P_SHIFT : - 0))); + set_le_ih_k_offset(ih, le_ih_k_offset(ih) + + (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); put_ih_item_len(ih, new_item_len); if (tb->lbytes > zeros_num) { - body += - (tb->lbytes - zeros_num); + body += (tb->lbytes - zeros_num); zeros_num = 0; } else zeros_num -= tb->lbytes; @@ -383,15 +365,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h } else { /* new item in whole falls into L[0] */ /* Shift lnum[0]-1 items to L[0] */ - ret_val = - leaf_shift_left(tb, tb->lnum[0] - 1, - tb->lbytes); + ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); /* Insert new item into L[0] */ buffer_info_init_left(tb, &bi); - leaf_insert_into_buf(&bi, - n + item_pos - - ret_val, ih, body, - zeros_num); + leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num); tb->insert_size[0] = 0; zeros_num = 0; } @@ -399,264 +376,117 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h case M_PASTE: /* append item in L[0] */ - if (item_pos == tb->lnum[0] - 1 - && tb->lbytes != -1) { + if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { /* we must shift the part of the appended item */ - if (is_direntry_le_ih - (B_N_PITEM_HEAD(tbS0, item_pos))) { + if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { RFALSE(zeros_num, "PAP-12090: invalid parameter in case of a directory"); /* directory item */ if (tb->lbytes > pos_in_item) { /* new directory entry falls into L[0] */ - struct item_head - *pasted; - int l_pos_in_item = - pos_in_item; + struct item_head *pasted; + int l_pos_in_item = pos_in_item; /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ - ret_val = - leaf_shift_left(tb, - tb-> - lnum - [0], - tb-> - lbytes - - - 1); - if (ret_val - && !item_pos) { - pasted = - B_N_PITEM_HEAD - (tb->L[0], - B_NR_ITEMS - (tb-> - L[0]) - - 1); - l_pos_in_item += - I_ENTRY_COUNT - (pasted) - - (tb-> - lbytes - - 1); + ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1); + if (ret_val && !item_pos) { + pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); + l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1); } /* Append given directory entry to directory item */ buffer_info_init_left(tb, &bi); - leaf_paste_in_buffer - (&bi, - n + item_pos - - ret_val, - l_pos_in_item, - tb->insert_size[0], - body, zeros_num); + leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num); /* previous string prepared space for pasting new entry, following string pastes this entry */ /* when we have merge directory item, pos_in_item has been changed too */ /* paste new directory entry. 1 is entry number */ - leaf_paste_entries(&bi, - n + - item_pos - - - ret_val, - l_pos_in_item, - 1, - (struct - reiserfs_de_head - *) - body, - body - + - DEH_SIZE, - tb-> - insert_size - [0] - ); + leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item, + 1, (struct reiserfs_de_head *) body, + body + DEH_SIZE, tb->insert_size[0]); tb->insert_size[0] = 0; } else { /* new directory item doesn't fall into L[0] */ /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ - leaf_shift_left(tb, - tb-> - lnum[0], - tb-> - lbytes); + leaf_shift_left(tb, tb->lnum[0], tb->lbytes); } /* Calculate new position to append in item body */ pos_in_item -= tb->lbytes; } else { /* regular object */ - RFALSE(tb->lbytes <= 0, - "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", - tb->lbytes); - RFALSE(pos_in_item != - ih_item_len - (B_N_PITEM_HEAD - (tbS0, item_pos)), + RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes); + RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)), "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", - ih_item_len - (B_N_PITEM_HEAD - (tbS0, item_pos)), - pos_in_item); + ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item); if (tb->lbytes >= pos_in_item) { /* appended item will be in L[0] in whole */ int l_n; /* this bytes number must be appended to the last item of L[h] */ - l_n = - tb->lbytes - - pos_in_item; + l_n = tb->lbytes - pos_in_item; /* Calculate new insert_size[0] */ - tb->insert_size[0] -= - l_n; + tb->insert_size[0] -= l_n; - RFALSE(tb-> - insert_size[0] <= - 0, + RFALSE(tb->insert_size[0] <= 0, "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", - tb-> - insert_size[0]); - ret_val = - leaf_shift_left(tb, - tb-> - lnum - [0], - ih_item_len - (B_N_PITEM_HEAD - (tbS0, - item_pos))); + tb->insert_size[0]); + ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len + (B_N_PITEM_HEAD(tbS0, item_pos))); /* Append to body of item in L[0] */ buffer_info_init_left(tb, &bi); leaf_paste_in_buffer - (&bi, - n + item_pos - - ret_val, - ih_item_len - (B_N_PITEM_HEAD - (tb->L[0], - n + item_pos - - ret_val)), l_n, - body, - zeros_num > - l_n ? l_n : - zeros_num); + (&bi, n + item_pos - ret_val, ih_item_len + (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)), + l_n, body, + zeros_num > l_n ? l_n : zeros_num); /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ { int version; - int temp_l = - l_n; - - RFALSE - (ih_item_len - (B_N_PITEM_HEAD - (tbS0, - 0)), + int temp_l = l_n; + + RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)), "PAP-12106: item length must be 0"); - RFALSE - (comp_short_le_keys - (B_N_PKEY - (tbS0, 0), - B_N_PKEY - (tb->L[0], - n + - item_pos - - - ret_val)), + RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY + (tb->L[0], n + item_pos - ret_val)), "PAP-12107: items must be of the same file"); if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { - temp_l = - l_n - << - (tb-> - tb_sb-> - s_blocksize_bits - - - UNFM_P_SHIFT); + temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT); } /* update key of first item in S0 */ - version = - ih_version - (B_N_PITEM_HEAD - (tbS0, 0)); - set_le_key_k_offset - (version, - B_N_PKEY - (tbS0, 0), - le_key_k_offset - (version, - B_N_PKEY - (tbS0, - 0)) + - temp_l); + version = ih_version(B_N_PITEM_HEAD(tbS0, 0)); + set_le_key_k_offset(version, B_N_PKEY(tbS0, 0), + le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l); /* update left delimiting key */ - set_le_key_k_offset - (version, - B_N_PDELIM_KEY - (tb-> - CFL[0], - tb-> - lkey[0]), - le_key_k_offset - (version, - B_N_PDELIM_KEY - (tb-> - CFL[0], - tb-> - lkey[0])) - + temp_l); + set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), + le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l); } /* Calculate new body, position in item and insert_size[0] */ if (l_n > zeros_num) { - body += - (l_n - - zeros_num); + body += (l_n - zeros_num); zeros_num = 0; } else - zeros_num -= - l_n; + zeros_num -= l_n; pos_in_item = 0; - RFALSE - (comp_short_le_keys - (B_N_PKEY(tbS0, 0), - B_N_PKEY(tb->L[0], - B_NR_ITEMS - (tb-> - L[0]) - - 1)) - || - !op_is_left_mergeable - (B_N_PKEY(tbS0, 0), - tbS0->b_size) - || - !op_is_left_mergeable - (B_N_PDELIM_KEY - (tb->CFL[0], - tb->lkey[0]), - tbS0->b_size), + RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1)) + || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size) + || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size), "PAP-12120: item must be merge-able with left neighboring item"); } else { /* only part of the appended item will be in L[0] */ /* Calculate position in item for append in S[0] */ - pos_in_item -= - tb->lbytes; + pos_in_item -= tb->lbytes; - RFALSE(pos_in_item <= 0, - "PAP-12125: no place for paste. pos_in_item=%d", - pos_in_item); + RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item); /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ - leaf_shift_left(tb, - tb-> - lnum[0], - tb-> - lbytes); + leaf_shift_left(tb, tb->lnum[0], tb->lbytes); } } } else { /* appended item will be in L[0] in whole */ @@ -665,52 +495,30 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ /* then increment pos_in_item by the size of the last item in L[0] */ - pasted = - B_N_PITEM_HEAD(tb->L[0], - n - 1); + pasted = B_N_PITEM_HEAD(tb->L[0], n - 1); if (is_direntry_le_ih(pasted)) - pos_in_item += - ih_entry_count - (pasted); + pos_in_item += ih_entry_count(pasted); else - pos_in_item += - ih_item_len(pasted); + pos_in_item += ih_item_len(pasted); } /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ - ret_val = - leaf_shift_left(tb, tb->lnum[0], - tb->lbytes); + ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); /* Append to body of item in L[0] */ buffer_info_init_left(tb, &bi); - leaf_paste_in_buffer(&bi, - n + item_pos - - ret_val, + leaf_paste_in_buffer(&bi, n + item_pos - ret_val, pos_in_item, tb->insert_size[0], body, zeros_num); /* if appended item is directory, paste entry */ - pasted = - B_N_PITEM_HEAD(tb->L[0], - n + item_pos - - ret_val); + pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val); if (is_direntry_le_ih(pasted)) - leaf_paste_entries(&bi, - n + - item_pos - - ret_val, - pos_in_item, - 1, - (struct - reiserfs_de_head - *)body, - body + - DEH_SIZE, - tb-> - insert_size - [0] - ); + leaf_paste_entries(&bi, n + item_pos - ret_val, + pos_in_item, 1, + (struct reiserfs_de_head *) body, + body + DEH_SIZE, + tb->insert_size[0]); /* if appended item is indirect item, put unformatted node into un list */ if (is_indirect_le_ih(pasted)) set_ih_free_space(pasted, 0); @@ -722,13 +530,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h reiserfs_panic(tb->tb_sb, "PAP-12130", "lnum > 0: unexpected mode: " " %s(%d)", - (flag == - M_DELETE) ? "DELETE" : ((flag == - M_CUT) - ? "CUT" - : - "UNKNOWN"), - flag); + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); } } else { /* new item doesn't fall into L[0] */ @@ -748,14 +550,12 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h case M_INSERT: /* insert item */ if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ - loff_t old_key_comp, old_len, - r_zeros_number; + loff_t old_key_comp, old_len, r_zeros_number; const char *r_body; int version; loff_t offset; - leaf_shift_right(tb, tb->rnum[0] - 1, - -1); + leaf_shift_right(tb, tb->rnum[0] - 1, -1); version = ih_version(ih); /* Remember key component and item length */ @@ -763,29 +563,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h old_len = ih_item_len(ih); /* Calculate key component and item length to insert into R[0] */ - offset = - le_ih_k_offset(ih) + - ((old_len - - tb-> - rbytes) << (is_indirect_le_ih(ih) - ? tb->tb_sb-> - s_blocksize_bits - - UNFM_P_SHIFT : 0)); + offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0)); set_le_ih_k_offset(ih, offset); put_ih_item_len(ih, tb->rbytes); /* Insert part of the item into R[0] */ buffer_info_init_right(tb, &bi); if ((old_len - tb->rbytes) > zeros_num) { r_zeros_number = 0; - r_body = - body + (old_len - - tb->rbytes) - - zeros_num; + r_body = body + (old_len - tb->rbytes) - zeros_num; } else { r_body = body; - r_zeros_number = - zeros_num - (old_len - - tb->rbytes); + r_zeros_number = zeros_num - (old_len - tb->rbytes); zeros_num -= r_zeros_number; } @@ -798,25 +586,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h /* Calculate key component and item length to insert into S[0] */ set_le_ih_k_offset(ih, old_key_comp); - put_ih_item_len(ih, - old_len - tb->rbytes); + put_ih_item_len(ih, old_len - tb->rbytes); tb->insert_size[0] -= tb->rbytes; } else { /* whole new item falls into R[0] */ /* Shift rnum[0]-1 items to R[0] */ - ret_val = - leaf_shift_right(tb, - tb->rnum[0] - 1, - tb->rbytes); + ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); /* Insert new item into R[0] */ buffer_info_init_right(tb, &bi); - leaf_insert_into_buf(&bi, - item_pos - n + - tb->rnum[0] - 1, - ih, body, - zeros_num); + leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1, + ih, body, zeros_num); if (item_pos - n + tb->rnum[0] - 1 == 0) { replace_key(tb, tb->CFR[0], @@ -841,200 +622,97 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h RFALSE(zeros_num, "PAP-12145: invalid parameter in case of a directory"); - entry_count = - I_ENTRY_COUNT(B_N_PITEM_HEAD - (tbS0, - item_pos)); + entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD + (tbS0, item_pos)); if (entry_count - tb->rbytes < pos_in_item) /* new directory entry falls into R[0] */ { int paste_entry_position; - RFALSE(tb->rbytes - 1 >= - entry_count - || !tb-> - insert_size[0], + RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0], "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", - tb->rbytes, - entry_count); + tb->rbytes, entry_count); /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ - leaf_shift_right(tb, - tb-> - rnum - [0], - tb-> - rbytes - - 1); + leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); /* Paste given directory entry to directory item */ - paste_entry_position = - pos_in_item - - entry_count + - tb->rbytes - 1; + paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1; buffer_info_init_right(tb, &bi); - leaf_paste_in_buffer - (&bi, 0, - paste_entry_position, - tb->insert_size[0], - body, zeros_num); + leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num); /* paste entry */ - leaf_paste_entries(&bi, - 0, - paste_entry_position, - 1, - (struct - reiserfs_de_head - *) - body, - body - + - DEH_SIZE, - tb-> - insert_size - [0] - ); - - if (paste_entry_position - == 0) { + leaf_paste_entries(&bi, 0, paste_entry_position, 1, + (struct reiserfs_de_head *) body, + body + DEH_SIZE, tb->insert_size[0]); + + if (paste_entry_position == 0) { /* change delimiting keys */ - replace_key(tb, - tb-> - CFR - [0], - tb-> - rkey - [0], - tb-> - R - [0], - 0); + replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0); } tb->insert_size[0] = 0; pos_in_item++; } else { /* new directory entry doesn't fall into R[0] */ - leaf_shift_right(tb, - tb-> - rnum - [0], - tb-> - rbytes); + leaf_shift_right(tb, tb->rnum[0], tb->rbytes); } } else { /* regular object */ - int n_shift, n_rem, - r_zeros_number; + int n_shift, n_rem, r_zeros_number; const char *r_body; /* Calculate number of bytes which must be shifted from appended item */ - if ((n_shift = - tb->rbytes - - tb->insert_size[0]) < 0) + if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0) n_shift = 0; - RFALSE(pos_in_item != - ih_item_len - (B_N_PITEM_HEAD - (tbS0, item_pos)), + RFALSE(pos_in_item != ih_item_len + (B_N_PITEM_HEAD(tbS0, item_pos)), "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", - pos_in_item, - ih_item_len - (B_N_PITEM_HEAD - (tbS0, item_pos))); - - leaf_shift_right(tb, - tb->rnum[0], - n_shift); + pos_in_item, ih_item_len + (B_N_PITEM_HEAD(tbS0, item_pos))); + + leaf_shift_right(tb, tb->rnum[0], n_shift); /* Calculate number of bytes which must remain in body after appending to R[0] */ - if ((n_rem = - tb->insert_size[0] - - tb->rbytes) < 0) + if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0) n_rem = 0; { int version; - unsigned long temp_rem = - n_rem; - - version = - ih_version - (B_N_PITEM_HEAD - (tb->R[0], 0)); - if (is_indirect_le_key - (version, - B_N_PKEY(tb->R[0], - 0))) { - temp_rem = - n_rem << - (tb->tb_sb-> - s_blocksize_bits - - - UNFM_P_SHIFT); + unsigned long temp_rem = n_rem; + + version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0)); + if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) { + temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT); } - set_le_key_k_offset - (version, - B_N_PKEY(tb->R[0], - 0), - le_key_k_offset - (version, - B_N_PKEY(tb->R[0], - 0)) + - temp_rem); - set_le_key_k_offset - (version, - B_N_PDELIM_KEY(tb-> - CFR - [0], - tb-> - rkey - [0]), - le_key_k_offset - (version, - B_N_PDELIM_KEY - (tb->CFR[0], - tb->rkey[0])) + - temp_rem); + set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0), + le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem); + set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]), + le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem); } /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ - do_balance_mark_internal_dirty - (tb, tb->CFR[0], 0); + do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); /* Append part of body into R[0] */ buffer_info_init_right(tb, &bi); if (n_rem > zeros_num) { r_zeros_number = 0; - r_body = - body + n_rem - - zeros_num; + r_body = body + n_rem - zeros_num; } else { r_body = body; - r_zeros_number = - zeros_num - n_rem; - zeros_num -= - r_zeros_number; + r_zeros_number = zeros_num - n_rem; + zeros_num -= r_zeros_number; } - leaf_paste_in_buffer(&bi, 0, - n_shift, - tb-> - insert_size - [0] - - n_rem, - r_body, - r_zeros_number); - - if (is_indirect_le_ih - (B_N_PITEM_HEAD - (tb->R[0], 0))) { + leaf_paste_in_buffer(&bi, 0, n_shift, + tb->insert_size[0] - n_rem, + r_body, r_zeros_number); + + if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) { #if 0 RFALSE(n_rem, "PAP-12160: paste more than one unformatted node pointer"); #endif - set_ih_free_space - (B_N_PITEM_HEAD - (tb->R[0], 0), 0); + set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0); } tb->insert_size[0] = n_rem; if (!n_rem) @@ -1044,58 +722,28 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h struct item_head *pasted; - ret_val = - leaf_shift_right(tb, tb->rnum[0], - tb->rbytes); + ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes); /* append item in R[0] */ if (pos_in_item >= 0) { buffer_info_init_right(tb, &bi); - leaf_paste_in_buffer(&bi, - item_pos - - n + - tb-> - rnum[0], - pos_in_item, - tb-> - insert_size - [0], body, - zeros_num); + leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item, + tb->insert_size[0], body, zeros_num); } /* paste new entry, if item is directory item */ - pasted = - B_N_PITEM_HEAD(tb->R[0], - item_pos - n + - tb->rnum[0]); - if (is_direntry_le_ih(pasted) - && pos_in_item >= 0) { - leaf_paste_entries(&bi, - item_pos - - n + - tb->rnum[0], - pos_in_item, - 1, - (struct - reiserfs_de_head - *)body, - body + - DEH_SIZE, - tb-> - insert_size - [0] - ); + pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]); + if (is_direntry_le_ih(pasted) && pos_in_item >= 0) { + leaf_paste_entries(&bi, item_pos - n + tb->rnum[0], + pos_in_item, 1, + (struct reiserfs_de_head *) body, + body + DEH_SIZE, tb->insert_size[0]); if (!pos_in_item) { - RFALSE(item_pos - n + - tb->rnum[0], + RFALSE(item_pos - n + tb->rnum[0], "PAP-12165: directory item must be first item of node when pasting is in 0th position"); /* update delimiting keys */ - replace_key(tb, - tb->CFR[0], - tb->rkey[0], - tb->R[0], - 0); + replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); } } @@ -1111,22 +759,16 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h default: /* cases d and t */ reiserfs_panic(tb->tb_sb, "PAP-12175", "rnum > 0: unexpected mode: %s(%d)", - (flag == - M_DELETE) ? "DELETE" : ((flag == - M_CUT) ? "CUT" - : "UNKNOWN"), - flag); + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); } } /* tb->rnum[0] > 0 */ RFALSE(tb->blknum[0] > 3, - "PAP-12180: blknum can not be %d. It must be <= 3", - tb->blknum[0]); + "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); RFALSE(tb->blknum[0] < 0, - "PAP-12185: blknum can not be %d. It must be >= 0", - tb->blknum[0]); + "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); /* if while adding to a node we discover that it is possible to split it in two, and merge the left part into the left neighbor and the @@ -1177,8 +819,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ - int old_key_comp, old_len, - r_zeros_number; + int old_key_comp, old_len, r_zeros_number; const char *r_body; int version; @@ -1192,15 +833,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h old_len = ih_item_len(ih); /* Calculate key component and item length to insert into S_new[i] */ - set_le_ih_k_offset(ih, - le_ih_k_offset(ih) + - ((old_len - - sbytes[i]) << - (is_indirect_le_ih - (ih) ? tb->tb_sb-> - s_blocksize_bits - - UNFM_P_SHIFT : - 0))); + set_le_ih_k_offset(ih, le_ih_k_offset(ih) + + ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); put_ih_item_len(ih, sbytes[i]); @@ -1209,39 +843,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h if ((old_len - sbytes[i]) > zeros_num) { r_zeros_number = 0; - r_body = - body + (old_len - - sbytes[i]) - - zeros_num; + r_body = body + (old_len - sbytes[i]) - zeros_num; } else { r_body = body; - r_zeros_number = - zeros_num - (old_len - - sbytes[i]); + r_zeros_number = zeros_num - (old_len - sbytes[i]); zeros_num -= r_zeros_number; } - leaf_insert_into_buf(&bi, 0, ih, r_body, - r_zeros_number); + leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number); /* Calculate key component and item length to insert into S[i] */ set_le_ih_k_offset(ih, old_key_comp); - put_ih_item_len(ih, - old_len - sbytes[i]); + put_ih_item_len(ih, old_len - sbytes[i]); tb->insert_size[0] -= sbytes[i]; } else { /* whole new item falls into S_new[i] */ /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, - snum[i] - 1, sbytes[i], - S_new[i]); + snum[i] - 1, sbytes[i], S_new[i]); /* Insert new item into S_new[i] */ buffer_info_init_bh(tb, &bi, S_new[i]); - leaf_insert_into_buf(&bi, - item_pos - n + - snum[i] - 1, ih, - body, zeros_num); + leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1, + ih, body, zeros_num); zeros_num = tb->insert_size[0] = 0; } @@ -1268,150 +892,73 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h int entry_count; - entry_count = - ih_entry_count(aux_ih); + entry_count = ih_entry_count(aux_ih); - if (entry_count - sbytes[i] < - pos_in_item - && pos_in_item <= - entry_count) { + if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) { /* new directory entry falls into S_new[i] */ - RFALSE(!tb-> - insert_size[0], - "PAP-12215: insert_size is already 0"); - RFALSE(sbytes[i] - 1 >= - entry_count, + RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0"); + RFALSE(sbytes[i] - 1 >= entry_count, "PAP-12220: there are no so much entries (%d), only %d", - sbytes[i] - 1, - entry_count); + sbytes[i] - 1, entry_count); /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ - leaf_move_items - (LEAF_FROM_S_TO_SNEW, - tb, snum[i], - sbytes[i] - 1, - S_new[i]); + leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]); /* Paste given directory entry to directory item */ buffer_info_init_bh(tb, &bi, S_new[i]); - leaf_paste_in_buffer - (&bi, 0, - pos_in_item - - entry_count + - sbytes[i] - 1, - tb->insert_size[0], - body, zeros_num); + leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, + tb->insert_size[0], body, zeros_num); /* paste new directory entry */ - leaf_paste_entries(&bi, - 0, - pos_in_item - - - entry_count - + - sbytes - [i] - - 1, 1, - (struct - reiserfs_de_head - *) - body, - body - + - DEH_SIZE, - tb-> - insert_size - [0] - ); + leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1, + (struct reiserfs_de_head *) body, + body + DEH_SIZE, tb->insert_size[0]); tb->insert_size[0] = 0; pos_in_item++; } else { /* new directory entry doesn't fall into S_new[i] */ - leaf_move_items - (LEAF_FROM_S_TO_SNEW, - tb, snum[i], - sbytes[i], - S_new[i]); + leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]); } } else { /* regular object */ - int n_shift, n_rem, - r_zeros_number; + int n_shift, n_rem, r_zeros_number; const char *r_body; - RFALSE(pos_in_item != - ih_item_len - (B_N_PITEM_HEAD - (tbS0, item_pos)) - || tb->insert_size[0] <= - 0, + RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0, "PAP-12225: item too short or insert_size <= 0"); /* Calculate number of bytes which must be shifted from appended item */ - n_shift = - sbytes[i] - - tb->insert_size[0]; + n_shift = sbytes[i] - tb->insert_size[0]; if (n_shift < 0) n_shift = 0; - leaf_move_items - (LEAF_FROM_S_TO_SNEW, tb, - snum[i], n_shift, - S_new[i]); + leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]); /* Calculate number of bytes which must remain in body after append to S_new[i] */ - n_rem = - tb->insert_size[0] - - sbytes[i]; + n_rem = tb->insert_size[0] - sbytes[i]; if (n_rem < 0) n_rem = 0; /* Append part of body into S_new[0] */ buffer_info_init_bh(tb, &bi, S_new[i]); if (n_rem > zeros_num) { r_zeros_number = 0; - r_body = - body + n_rem - - zeros_num; + r_body = body + n_rem - zeros_num; } else { r_body = body; - r_zeros_number = - zeros_num - n_rem; - zeros_num -= - r_zeros_number; + r_zeros_number = zeros_num - n_rem; + zeros_num -= r_zeros_number; } - leaf_paste_in_buffer(&bi, 0, - n_shift, - tb-> - insert_size - [0] - - n_rem, - r_body, - r_zeros_number); + leaf_paste_in_buffer(&bi, 0, n_shift, + tb->insert_size[0] - n_rem, + r_body, r_zeros_number); { struct item_head *tmp; - tmp = - B_N_PITEM_HEAD(S_new - [i], - 0); + tmp = B_N_PITEM_HEAD(S_new[i], 0); if (is_indirect_le_ih (tmp)) { - set_ih_free_space - (tmp, 0); - set_le_ih_k_offset - (tmp, - le_ih_k_offset - (tmp) + - (n_rem << - (tb-> - tb_sb-> - s_blocksize_bits - - - UNFM_P_SHIFT))); + set_ih_free_space(tmp, 0); + set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT))); } else { - set_le_ih_k_offset - (tmp, - le_ih_k_offset - (tmp) + - n_rem); + set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem); } } @@ -1426,8 +973,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h struct item_head *pasted; #ifdef CONFIG_REISERFS_CHECK - struct item_head *ih_check = - B_N_PITEM_HEAD(tbS0, item_pos); + struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos); if (!is_direntry_le_ih(ih_check) && (pos_in_item != ih_item_len(ih_check) @@ -1439,8 +985,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h "to ih_item_len"); #endif /* CONFIG_REISERFS_CHECK */ - leaf_mi = - leaf_move_items(LEAF_FROM_S_TO_SNEW, + leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i], S_new[i]); @@ -1452,30 +997,19 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h /* paste into item */ buffer_info_init_bh(tb, &bi, S_new[i]); leaf_paste_in_buffer(&bi, - item_pos - n + - snum[i], + item_pos - n + snum[i], pos_in_item, tb->insert_size[0], body, zeros_num); - pasted = - B_N_PITEM_HEAD(S_new[i], - item_pos - n + - snum[i]); + pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]); if (is_direntry_le_ih(pasted)) { leaf_paste_entries(&bi, - item_pos - - n + snum[i], - pos_in_item, - 1, - (struct - reiserfs_de_head - *)body, - body + - DEH_SIZE, - tb-> - insert_size - [0] + item_pos - n + snum[i], + pos_in_item, 1, + (struct reiserfs_de_head *)body, + body + DEH_SIZE, + tb->insert_size[0] ); } @@ -1495,11 +1029,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h default: /* cases d and t */ reiserfs_panic(tb->tb_sb, "PAP-12245", "blknum > 2: unexpected mode: %s(%d)", - (flag == - M_DELETE) ? "DELETE" : ((flag == - M_CUT) ? "CUT" - : "UNKNOWN"), - flag); + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); } memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); @@ -1524,9 +1054,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h /* If we insert the first key change the delimiting key */ if (item_pos == 0) { if (tb->CFL[0]) /* can be 0 in reiserfsck */ - replace_key(tb, tb->CFL[0], tb->lkey[0], - tbS0, 0); - + replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); } break; @@ -1536,53 +1064,27 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h pasted = B_N_PITEM_HEAD(tbS0, item_pos); /* when directory, may be new entry already pasted */ if (is_direntry_le_ih(pasted)) { - if (pos_in_item >= 0 && - pos_in_item <= - ih_entry_count(pasted)) { + if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) { RFALSE(!tb->insert_size[0], "PAP-12260: insert_size is 0 already"); /* prepare space */ buffer_info_init_tbS0(tb, &bi); - leaf_paste_in_buffer(&bi, - item_pos, - pos_in_item, - tb-> - insert_size - [0], body, + leaf_paste_in_buffer(&bi, item_pos, pos_in_item, + tb->insert_size[0], body, zeros_num); /* paste entry */ - leaf_paste_entries(&bi, - item_pos, - pos_in_item, - 1, - (struct - reiserfs_de_head - *)body, - body + - DEH_SIZE, - tb-> - insert_size - [0] - ); + leaf_paste_entries(&bi, item_pos, pos_in_item, 1, + (struct reiserfs_de_head *)body, + body + DEH_SIZE, + tb->insert_size[0]); if (!item_pos && !pos_in_item) { - RFALSE(!tb->CFL[0] - || !tb->L[0], + RFALSE(!tb->CFL[0] || !tb->L[0], "PAP-12270: CFL[0]/L[0] must be specified"); - if (tb->CFL[0]) { - replace_key(tb, - tb-> - CFL - [0], - tb-> - lkey - [0], - tbS0, - 0); - - } + if (tb->CFL[0]) + replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); } tb->insert_size[0] = 0; } @@ -1593,13 +1095,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h "PAP-12275: insert size must not be %d", tb->insert_size[0]); buffer_info_init_tbS0(tb, &bi); - leaf_paste_in_buffer(&bi, - item_pos, - pos_in_item, - tb-> - insert_size - [0], body, - zeros_num); + leaf_paste_in_buffer(&bi, item_pos, pos_in_item, + tb->insert_size[0], body, zeros_num); if (is_indirect_le_ih(pasted)) { #if 0 @@ -1611,8 +1108,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h tb-> insert_size[0]); #endif - set_ih_free_space - (pasted, 0); + set_ih_free_space(pasted, 0); } tb->insert_size[0] = 0; } @@ -1620,8 +1116,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h else { if (tb->insert_size[0]) { print_cur_tb("12285"); - reiserfs_panic(tb-> - tb_sb, + reiserfs_panic(tb->tb_sb, "PAP-12285", "insert_size " "must be 0 " diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 8e4f41d9af4d..34c7bdc06014 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte) } #endif +#ifndef ptep_set_numa +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_t ptent = *ptep; + + ptent = pte_mknuma(ptent); + set_pte_at(mm, addr, ptep, ptent); + return; +} +#endif + #ifndef pmd_mknuma static inline pmd_t pmd_mknuma(pmd_t pmd) { @@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd) return pmd_clear_flags(pmd, _PAGE_PRESENT); } #endif + +#ifndef pmdp_set_numa +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + pmd = pmd_mknuma(pmd); + set_pmd_at(mm, addr, pmdp, pmd); + return; +} +#endif #else extern int pte_numa(pte_t pte); extern int pmd_numa(pmd_t pmd); @@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte); extern pmd_t pmd_mknonnuma(pmd_t pmd); extern pte_t pte_mknuma(pte_t pte); extern pmd_t pmd_mknuma(pmd_t pmd); +extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ #else static inline int pmd_numa(pmd_t pmd) @@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte) return pte; } +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return; +} + + static inline pmd_t pmd_mknuma(pmd_t pmd) { return pmd; } + +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + return ; +} #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_MMU */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 04086c5be930..04a7f31301f8 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -199,6 +199,9 @@ int drm_err(const char *func, const char *format, ...); #define DRM_INFO(fmt, ...) \ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) +#define DRM_INFO_ONCE(fmt, ...) \ + printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) + /** * Debug output. * diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 71727b6210ae..8f3dee097579 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -907,6 +907,9 @@ struct drm_mode_config { /* whether async page flip is supported or not */ bool async_page_flip; + + /* cursor size */ + uint32_t cursor_width, cursor_height; }; #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index d1f61bfe0ebe..49a828425fa2 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -29,6 +29,8 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_memory.h> +struct device; + /** * Initialize pool allocator. */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 70654521dab6..5a4d39b4686b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio) struct bio_vec bv; struct bvec_iter iter; + /* + * We special case discard/write same, because they interpret bi_size + * differently: + */ + + if (bio->bi_rw & REQ_DISCARD) + return 1; + + if (bio->bi_rw & REQ_WRITE_SAME) + return 1; + bio_for_each_segment(bv, bio, iter) segs++; @@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); extern struct bio_set *fs_bio_set; +unsigned int bio_integrity_tag_size(struct bio *bio); static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 161b23105b1e..18ba8a627f46 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -83,6 +83,8 @@ struct blk_mq_ops { */ rq_timed_out_fn *timeout; + softirq_done_fn *complete; + /* * Override for hctx allocations (should probably go) */ @@ -119,11 +121,12 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); -void blk_mq_insert_request(struct request_queue *, struct request *, bool); +void blk_mq_insert_request(struct request_queue *, struct request *, + bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); @@ -133,6 +136,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); +void blk_mq_complete_request(struct request *rq); + void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8678c4322b44..4afa4f8f6090 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -98,7 +98,7 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct mq_flush_data; + struct work_struct mq_flush_work; }; struct request_queue *q; @@ -448,13 +448,8 @@ struct request_queue { unsigned long flush_pending_since; struct list_head flush_queue[2]; struct list_head flush_data_in_flight; - union { - struct request flush_rq; - struct { - spinlock_t mq_flush_lock; - struct work_struct mq_flush_work; - }; - }; + struct request *flush_rq; + spinlock_t mq_flush_lock; struct mutex sysfs_lock; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 2623cffc73a1..25bfb0eff772 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op); /* * Ceph setxattr request flags. */ -#define CEPH_XATTR_CREATE 1 -#define CEPH_XATTR_REPLACE 2 +#define CEPH_XATTR_CREATE (1 << 0) +#define CEPH_XATTR_REPLACE (1 << 1) +#define CEPH_XATTR_REMOVE (1 << 31) union ceph_mds_request_args { struct { diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index ded429966c1f..2507fd2a1eb4 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -75,11 +75,7 @@ * * (asm goto is automatically volatile - the naming reflects this.) */ -#if GCC_VERSION <= 40801 -# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) -#else -# define asm_volatile_goto(x...) do { asm goto(x); } while (0) -#endif +#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP #if GCC_VERSION >= 40400 diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index dfac5ed31120..f886985a28b2 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -171,7 +171,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, size_t size, int flags, const char *); #define dma_buf_export(priv, ops, size, flags) \ - dma_buf_export_named(priv, ops, size, flags, __FILE__) + dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME) int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 4d34dbbbad4d..7a8144fef406 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -4,8 +4,6 @@ #include <linux/err.h> #include <linux/kernel.h> -#ifdef CONFIG_GPIOLIB - struct device; struct gpio_chip; @@ -18,6 +16,8 @@ struct gpio_chip; */ struct gpio_desc; +#ifdef CONFIG_GPIOLIB + /* Acquire and dispose GPIOs */ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 15da677478dd..344883dce584 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -875,7 +875,7 @@ struct vmbus_channel_relid_released { struct vmbus_channel_initiate_contact { struct vmbus_channel_message_header header; u32 vmbus_version_requested; - u32 padding2; + u32 target_vcpu; /* The VCPU the host should respond to */ u64 interrupt_page; u64 monitor_page1; u64 monitor_page2; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0053adde0ed9..a2678d35b5a2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -158,6 +158,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, devname, dev_id); } +extern int __must_check +devm_request_any_context_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id); + extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); /* diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 554548cd3dd4..130bc8d77fa5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -38,8 +38,10 @@ #include <linux/pci.h> #include <linux/spinlock_types.h> #include <linux/semaphore.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/radix-tree.h> + #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> @@ -227,6 +229,7 @@ struct mlx5_uuar_info { * protect uuar allocation data structs */ struct mutex lock; + u32 ver; }; struct mlx5_bf { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 891432a994c0..5e4756553c18 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -752,6 +752,9 @@ struct netdev_phys_port_id { unsigned char id_len; }; +typedef u16 (*select_queue_fallback_t)(struct net_device *dev, + struct sk_buff *skb); + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -783,7 +786,7 @@ struct netdev_phys_port_id { * Required can not be NULL. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - * void *accel_priv); + * void *accel_priv, select_queue_fallback_t fallback); * Called to decide which queue to when device supports multiple * transmit queues. * @@ -1005,7 +1008,8 @@ struct net_device_ops { struct net_device *dev); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + void *accel_priv, + select_queue_fallback_t fallback); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); @@ -1545,7 +1549,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); -u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); /* * Net namespace inlines @@ -2284,6 +2287,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue) } /** + * netdev_cap_txqueue - check if selected tx queue exceeds device queues + * @dev: network device + * @queue_index: given tx queue index + * + * Returns 0 if given tx queue index >= number of device tx queues, + * otherwise returns the originally passed tx queue index. + */ +static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) +{ + if (unlikely(queue_index >= dev->real_num_tx_queues)) { + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", + dev->name, queue_index, + dev->real_num_tx_queues); + return 0; + } + + return queue_index; +} + +/** * netif_running - test if up * @dev: network device * @@ -3076,7 +3099,12 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -netdev_features_t netif_skb_features(struct sk_buff *skb); +netdev_features_t netif_skb_dev_features(struct sk_buff *skb, + const struct net_device *dev); +static inline netdev_features_t netif_skb_features(struct sk_buff *skb) +{ + return netif_skb_dev_features(skb, skb->dev); +} static inline bool net_gso_ok(netdev_features_t features, int gso_type) { diff --git a/include/linux/of.h b/include/linux/of.h index 70c64ba17fa5..435cb995904d 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np) extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); -#define for_each_node_by_name(dn, name) \ - for (dn = of_find_node_by_name(NULL, name); dn; \ - dn = of_find_node_by_name(dn, name)) extern struct device_node *of_find_node_by_type(struct device_node *from, const char *type); -#define for_each_node_by_type(dn, type) \ - for (dn = of_find_node_by_type(NULL, type); dn; \ - dn = of_find_node_by_type(dn, type)) extern struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compat); -#define for_each_compatible_node(dn, type, compatible) \ - for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ - dn = of_find_compatible_node(dn, type, compatible)) extern struct device_node *of_find_matching_node_and_match( struct device_node *from, const struct of_device_id *matches, const struct of_device_id **match); -static inline struct device_node *of_find_matching_node( - struct device_node *from, - const struct of_device_id *matches) -{ - return of_find_matching_node_and_match(from, matches, NULL); -} -#define for_each_matching_node(dn, matches) \ - for (dn = of_find_matching_node(NULL, matches); dn; \ - dn = of_find_matching_node(dn, matches)) -#define for_each_matching_node_and_match(dn, matches, match) \ - for (dn = of_find_matching_node_and_match(NULL, matches, match); \ - dn; dn = of_find_matching_node_and_match(dn, matches, match)) + extern struct device_node *of_find_node_by_path(const char *path); extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); @@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child( extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); -#define for_each_child_of_node(parent, child) \ - for (child = of_get_next_child(parent, NULL); child != NULL; \ - child = of_get_next_child(parent, child)) - -#define for_each_available_child_of_node(parent, child) \ - for (child = of_get_next_available_child(parent, NULL); child != NULL; \ - child = of_get_next_available_child(parent, child)) - -static inline int of_get_child_count(const struct device_node *np) -{ - struct device_node *child; - int num = 0; - - for_each_child_of_node(np, child) - num++; - - return num; -} - -static inline int of_get_available_child_count(const struct device_node *np) -{ - struct device_node *child; - int num = 0; - - for_each_available_child_of_node(np, child) - num++; - - return num; -} /* cache lookup */ extern struct device_node *of_find_next_cache_node(const struct device_node *); extern struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name); -#define for_each_node_with_property(dn, prop_name) \ - for (dn = of_find_node_with_property(NULL, prop_name); dn; \ - dn = of_find_node_with_property(dn, prop_name)) extern struct property *of_find_property(const struct device_node *np, const char *name, @@ -367,42 +315,53 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from, return NULL; } -static inline struct device_node *of_get_parent(const struct device_node *node) +static inline struct device_node *of_find_node_by_type(struct device_node *from, + const char *type) { return NULL; } -static inline bool of_have_populated_dt(void) +static inline struct device_node *of_find_matching_node_and_match( + struct device_node *from, + const struct of_device_id *matches, + const struct of_device_id **match) { - return false; + return NULL; } -/* Kill an unused variable warning on a device_node pointer */ -static inline void __of_use_dn(const struct device_node *np) +static inline struct device_node *of_get_parent(const struct device_node *node) { + return NULL; } -#define for_each_child_of_node(parent, child) \ - while (__of_use_dn(parent), __of_use_dn(child), 0) +static inline struct device_node *of_get_next_child( + const struct device_node *node, struct device_node *prev) +{ + return NULL; +} -#define for_each_available_child_of_node(parent, child) \ - while (0) +static inline struct device_node *of_get_next_available_child( + const struct device_node *node, struct device_node *prev) +{ + return NULL; +} -static inline struct device_node *of_get_child_by_name( - const struct device_node *node, - const char *name) +static inline struct device_node *of_find_node_with_property( + struct device_node *from, const char *prop_name) { return NULL; } -static inline int of_get_child_count(const struct device_node *np) +static inline bool of_have_populated_dt(void) { - return 0; + return false; } -static inline int of_get_available_child_count(const struct device_node *np) +static inline struct device_node *of_get_child_by_name( + const struct device_node *node, + const char *name) { - return 0; + return NULL; } static inline int of_device_is_compatible(const struct device_node *device, @@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np); static inline int of_node_to_nid(struct device_node *device) { return 0; } #endif +static inline struct device_node *of_find_matching_node( + struct device_node *from, + const struct of_device_id *matches) +{ + return of_find_matching_node_and_match(from, matches, NULL); +} + /** * of_property_read_bool - Findfrom a property * @np: device node from which the property value is to be read. @@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np, s; \ s = of_prop_next_string(prop, s)) +#define for_each_node_by_name(dn, name) \ + for (dn = of_find_node_by_name(NULL, name); dn; \ + dn = of_find_node_by_name(dn, name)) +#define for_each_node_by_type(dn, type) \ + for (dn = of_find_node_by_type(NULL, type); dn; \ + dn = of_find_node_by_type(dn, type)) +#define for_each_compatible_node(dn, type, compatible) \ + for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ + dn = of_find_compatible_node(dn, type, compatible)) +#define for_each_matching_node(dn, matches) \ + for (dn = of_find_matching_node(NULL, matches); dn; \ + dn = of_find_matching_node(dn, matches)) +#define for_each_matching_node_and_match(dn, matches, match) \ + for (dn = of_find_matching_node_and_match(NULL, matches, match); \ + dn; dn = of_find_matching_node_and_match(dn, matches, match)) + +#define for_each_child_of_node(parent, child) \ + for (child = of_get_next_child(parent, NULL); child != NULL; \ + child = of_get_next_child(parent, child)) +#define for_each_available_child_of_node(parent, child) \ + for (child = of_get_next_available_child(parent, NULL); child != NULL; \ + child = of_get_next_available_child(parent, child)) + +#define for_each_node_with_property(dn, prop_name) \ + for (dn = of_find_node_with_property(NULL, prop_name); dn; \ + dn = of_find_node_with_property(dn, prop_name)) + +static inline int of_get_child_count(const struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_child_of_node(np, child) + num++; + + return num; +} + +static inline int of_get_available_child_count(const struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + num++; + + return num; +} + #if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE) extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8d7dd6768cb7..ef370210ffb2 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev, static inline void of_device_node_put(struct device *dev) { } -static inline const struct of_device_id *of_match_device( +static inline const struct of_device_id *__of_match_device( const struct of_device_id *matches, const struct device *dev) { return NULL; } +#define of_match_device(matches, dev) \ + __of_match_device(of_match_ptr(matches), (dev)) static inline struct device_node *of_cpu_device_node_get(int cpu) { diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index e273e5ac19c9..3f83459dbb20 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -146,7 +146,9 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width) phy->attrs.bus_width = bus_width; } struct phy *phy_get(struct device *dev, const char *string); +struct phy *phy_optional_get(struct device *dev, const char *string); struct phy *devm_phy_get(struct device *dev, const char *string); +struct phy *devm_phy_optional_get(struct device *dev, const char *string); void phy_put(struct phy *phy); void devm_phy_put(struct device *dev, struct phy *phy); struct phy *of_phy_simple_xlate(struct device *dev, @@ -232,11 +234,23 @@ static inline struct phy *phy_get(struct device *dev, const char *string) return ERR_PTR(-ENOSYS); } +static inline struct phy *phy_optional_get(struct device *dev, + const char *string) +{ + return ERR_PTR(-ENOSYS); +} + static inline struct phy *devm_phy_get(struct device *dev, const char *string) { return ERR_PTR(-ENOSYS); } +static inline struct phy *devm_phy_optional_get(struct device *dev, + const char *string) +{ + return ERR_PTR(-ENOSYS); +} + static inline void phy_put(struct phy *phy) { } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f589c9af8cbf..3ebbbe7b6d05 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) { return !skb->head_frag || skb_cloned(skb); } + +/** + * skb_gso_network_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_network_seglen is used to determine the real size of the + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). + * + * The MAC/L2 header is not accounted for. + */ +static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - + skb_network_header(skb); + return hdr_len + skb_gso_transport_seglen(skb); +} #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a1d4ca290862..4203c66d8803 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -273,7 +273,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * message while queuing transfers that arrive in the meantime. When the * driver is finished with this message, it must call * spi_finalize_current_message() so the subsystem can issue the next - * transfer + * message * @unprepare_transfer_hardware: there are currently no more messages on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call @@ -287,7 +287,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * - return 1 if the transfer is still in progress. When * the driver is finished with this transfer it must * call spi_finalize_current_transfer() so the subsystem - * can issue the next transfer + * can issue the next transfer. Note: transfer_one and + * transfer_one_message are mutually exclusive; when both + * are set, the generic subsystem does not call your + * transfer_one callback. * @unprepare_message: undo any work done by prepare_message(). * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * number. Any individual value may be -ENOENT for CS lines that diff --git a/include/linux/usb.h b/include/linux/usb.h index c716da18c668..7f6eb859873e 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1265,8 +1265,6 @@ typedef void (*usb_complete_t)(struct urb *); * @sg: scatter gather buffer list, the buffer size of each element in * the list (except the last) must be divisible by the endpoint's * max packet size if no_sg_constraint isn't set in 'struct usb_bus' - * (FIXME: scatter-gather under xHCI is broken for periodic transfers. - * Do not use urb->sg for interrupt endpoints for now, only bulk.) * @num_mapped_sgs: (internal) number of mapped sg entries * @num_sgs: number of entries in the sg list * @transfer_buffer_length: How big is transfer_buffer. The transfer may diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d992ca3145fe..6ee76c804893 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1653,17 +1653,6 @@ struct sctp_association { /* This is the last advertised value of rwnd over a SACK chunk. */ __u32 a_rwnd; - /* Number of bytes by which the rwnd has slopped. The rwnd is allowed - * to slop over a maximum of the association's frag_point. - */ - __u32 rwnd_over; - - /* Keeps treack of rwnd pressure. This happens when we have - * a window, but not recevie buffer (i.e small packets). This one - * is releases slowly (1 PMTU at a time ). - */ - __u32 rwnd_press; - /* This is the sndbuf size in use for the association. * This corresponds to the sndbuf size for the association, * as specified in the sk->sndbuf. @@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); -void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); -void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); +void sctp_assoc_rwnd_update(struct sctp_association *, bool); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); void sctp_assoc_del_nonprimary_peers(struct sctp_association *, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8d4a1c06f7e4..6793f32ccb58 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -226,7 +226,8 @@ enum ib_port_cap_flags { IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, IB_PORT_BOOT_MGMT_SUP = 1 << 23, IB_PORT_LINK_LATENCY_SUP = 1 << 24, - IB_PORT_CLIENT_REG_SUP = 1 << 25 + IB_PORT_CLIENT_REG_SUP = 1 << 25, + IB_PORT_IP_BASED_GIDS = 1 << 26 }; enum ib_port_width { diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c9c791209cd1..1772fadcff62 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -525,7 +525,6 @@ struct se_cmd { #define CMD_T_COMPLETE (1 << 2) #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) -#define CMD_T_FAILED (1 << 6) #define CMD_T_DEV_ACTIVE (1 << 7) #define CMD_T_REQUEST_STOP (1 << 8) #define CMD_T_BUSY (1 << 9) diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 9e9475c85de5..e5bf9a76f169 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -42,7 +42,6 @@ TRACE_EVENT(pstate_sample, u32 state, u64 mperf, u64 aperf, - u32 energy, u32 freq ), @@ -51,7 +50,6 @@ TRACE_EVENT(pstate_sample, state, mperf, aperf, - energy, freq ), @@ -61,7 +59,6 @@ TRACE_EVENT(pstate_sample, __field(u32, state) __field(u64, mperf) __field(u64, aperf) - __field(u32, energy) __field(u32, freq) ), @@ -72,17 +69,15 @@ TRACE_EVENT(pstate_sample, __entry->state = state; __entry->mperf = mperf; __entry->aperf = aperf; - __entry->energy = energy; __entry->freq = freq; ), - TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ", + TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ", (unsigned long)__entry->core_busy, (unsigned long)__entry->scaled_busy, (unsigned long)__entry->state, (unsigned long long)__entry->mperf, (unsigned long long)__entry->aperf, - (unsigned long)__entry->energy, (unsigned long)__entry->freq ) diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3c9a833992e8..b06c8ed68707 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -619,6 +619,8 @@ struct drm_gem_open { #define DRM_PRIME_CAP_EXPORT 0x2 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 +#define DRM_CAP_CURSOR_WIDTH 0x8 +#define DRM_CAP_CURSOR_HEIGHT 0x9 /** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 9971c560ed9a..87792a5fee3b 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -87,6 +87,7 @@ #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 +#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 /** * struct drm_vmw_getparam_arg diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 1b8a0f4c9590..b4d69092fbdb 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -558,7 +558,6 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code) #define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64) #define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ struct btrfs_ioctl_space_args) -#define BTRFS_IOC_GLOBAL_RSV _IOR(BTRFS_IOCTL_MAGIC, 20, __u64) #define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h index 7fabba5059cf..feb0b4c0814c 100644 --- a/include/uapi/linux/mic_ioctl.h +++ b/include/uapi/linux/mic_ioctl.h @@ -39,7 +39,7 @@ struct mic_copy_desc { #else struct iovec *iov; #endif - int iovcnt; + __u32 iovcnt; __u8 vr_idx; __u8 update_used; __u32 out_len; diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild index 61257cb14653..5c459628e8c7 100644 --- a/include/uapi/xen/Kbuild +++ b/include/uapi/xen/Kbuild @@ -1,3 +1,5 @@ # UAPI Header export list header-y += evtchn.h +header-y += gntalloc.h +header-y += gntdev.h header-y += privcmd.h diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h index 76bd58065f4f..76bd58065f4f 100644 --- a/include/xen/gntalloc.h +++ b/include/uapi/xen/gntalloc.h diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h index 5304bd3c84c5..5304bd3c84c5 100644 --- a/include/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ae665ac59c36..32ec05a6572f 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t; * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. - * This pages are filled with an array of blkif_request_segment_aligned - * that hold the information about the segments. The number of indirect - * pages to use is determined by the maximum number of segments - * a indirect request contains. Every indirect page can contain a maximum - * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), - * so to calculate the number of indirect pages to use we have to do - * ceil(indirect_segments/512). + * These pages are filled with an array of blkif_request_segment that hold the + * information about the segments. The number of indirect pages to use is + * determined by the number of segments an indirect request contains. Every + * indirect page can contain a maximum of + * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to + * calculate the number of indirect pages to use we have to do + * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! @@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t; #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 -struct blkif_request_segment_aligned { - grant_ref_t gref; /* reference to I/O buffer frame */ - /* @first_sect: first sector in frame to transfer (inclusive). */ - /* @last_sect: last sector in frame to transfer (inclusive). */ - uint8_t first_sect, last_sect; - uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ -} __attribute__((__packed__)); +struct blkif_request_segment { + grant_ref_t gref; /* reference to I/O buffer frame */ + /* @first_sect: first sector in frame to transfer (inclusive). */ + /* @last_sect: last sector in frame to transfer (inclusive). */ + uint8_t first_sect, last_sect; +}; struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ @@ -151,12 +150,7 @@ struct blkif_request_rw { #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ - struct blkif_request_segment { - grant_ref_t gref; /* reference to I/O buffer frame */ - /* @first_sect: first sector in frame to transfer (inclusive). */ - /* @last_sect: last sector in frame to transfer (inclusive). */ - uint8_t first_sect, last_sect; - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h deleted file mode 100644 index ac45e0712afa..000000000000 --- a/include/xen/interface/xencomm.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Copyright (C) IBM Corp. 2006 - */ - -#ifndef _XEN_XENCOMM_H_ -#define _XEN_XENCOMM_H_ - -/* A xencomm descriptor is a scatter/gather list containing physical - * addresses corresponding to a virtually contiguous memory area. The - * hypervisor translates these physical addresses to machine addresses to copy - * to and from the virtually contiguous area. - */ - -#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ -#define XENCOMM_INVALID (~0UL) - -struct xencomm_desc { - uint32_t magic; - uint32_t nr_addrs; /* the number of entries in address[] */ - uint64_t address[0]; -}; - -#endif /* _XEN_XENCOMM_H_ */ diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h deleted file mode 100644 index e43b039be112..000000000000 --- a/include/xen/xencomm.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Copyright (C) IBM Corp. 2006 - * - * Authors: Hollis Blanchard <hollisb@us.ibm.com> - * Jerone Young <jyoung5@us.ibm.com> - */ - -#ifndef _LINUX_XENCOMM_H_ -#define _LINUX_XENCOMM_H_ - -#include <xen/interface/xencomm.h> - -#define XENCOMM_MINI_ADDRS 3 -struct xencomm_mini { - struct xencomm_desc _desc; - uint64_t address[XENCOMM_MINI_ADDRS]; -}; - -/* To avoid additionnal virt to phys conversion, an opaque structure is - presented. */ -struct xencomm_handle; - -extern void xencomm_free(struct xencomm_handle *desc); -extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); -extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, - unsigned long bytes, struct xencomm_mini *xc_area); - -#if 0 -#define XENCOMM_MINI_ALIGNED(xc_desc, n) \ - struct xencomm_mini xc_desc ## _base[(n)] \ - __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ - struct xencomm_mini *xc_desc = &xc_desc ## _base[0]; -#else -/* - * gcc bug workaround: - * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 - * gcc doesn't handle properly stack variable with - * __attribute__((__align__(sizeof(struct xencomm_mini)))) - */ -#define XENCOMM_MINI_ALIGNED(xc_desc, n) \ - unsigned char xc_desc ## _base[((n) + 1 ) * \ - sizeof(struct xencomm_mini)]; \ - struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ - ((unsigned long)xc_desc ## _base + \ - (sizeof(struct xencomm_mini) - \ - ((unsigned long)xc_desc ## _base) % \ - sizeof(struct xencomm_mini))); -#endif -#define xencomm_map_no_alloc(ptr, bytes) \ - ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ - __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) - -/* provided by architecture code: */ -extern unsigned long xencomm_vtop(unsigned long vaddr); - -static inline void *xencomm_pa(void *ptr) -{ - return (void *)xencomm_vtop((unsigned long)ptr); -} - -#define xen_guest_handle(hnd) ((hnd).p) - -#endif /* _LINUX_XENCOMM_H_ */ diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index bd8e788d71e0..1ef0606797c9 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c @@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq, EXPORT_SYMBOL(devm_request_threaded_irq); /** + * devm_request_any_context_irq - allocate an interrupt line for a managed device + * @dev: device to request interrupt for + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs + * @thread_fn: function to be called in a threaded interrupt context. NULL + * for devices which handle everything in @handler + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as + * request_any_context_irq(). IRQs requested with this function will be + * automatically freed on driver detach. + * + * If an IRQ allocated with this function needs to be freed + * separately, devm_free_irq() must be used. + */ +int devm_request_any_context_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id) +{ + struct irq_devres *dr; + int rc; + + dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), + GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id); + if (rc) { + devres_free(dr); + return rc; + } + + dr->irq = irq; + dr->dev_id = dev_id; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL(devm_request_any_context_irq); + +/** * devm_free_irq - free an interrupt * @dev: device to free interrupt for * @irq: Interrupt line to free diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 192a302d6cfd..8ab8e9390297 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) { return (irq < NR_IRQS) ? irq_desc + irq : NULL; } +EXPORT_SYMBOL(irq_to_desc); static void free_desc(unsigned int irq) { diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b1d255f04135..4dae9cbe9259 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) next_seq = log_next_seq; len = 0; - prev = 0; while (len >= 0 && seq < next_seq) { struct printk_log *msg = log_from_idx(idx); int textlen; @@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, next_idx = idx; l = 0; - prev = 0; while (seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 7a925ba456fb..a6a5bf53e86d 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -51,7 +51,13 @@ * HZ shrinks, so values greater than 8 overflow 32bits when * HZ=100. */ +#if HZ < 34 +#define JIFFIES_SHIFT 6 +#elif HZ < 67 +#define JIFFIES_SHIFT 7 +#else #define JIFFIES_SHIFT 8 +#endif static cycle_t jiffies_read(struct clocksource *cs) { diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 43780ab5e279..98977a57ac72 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -756,6 +756,7 @@ out: static void tick_broadcast_clear_oneshot(int cpu) { cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); + cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); } static void tick_broadcast_init_next_event(struct cpumask *mask, diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 294b8a271a04..fc4da2d97f9b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, write &= RB_WRITE_MASK; tail = write - length; + /* + * If this is the first commit on the page, then it has the same + * timestamp as the page itself. + */ + if (!tail) + delta = 0; + /* See if we shot pass the end of this buffer page */ if (unlikely(write > BUF_PAGE_SIZE)) return rb_move_tail(cpu_buffer, length, tail, diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 7be235f1a70b..93d145e5539c 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr, /* * Try to steal tags from a remote cpu's percpu freelist. * - * We first check how many percpu freelists have tags - we don't steal tags - * unless enough percpu freelists have tags on them that it's possible more than - * half the total tags could be stuck on remote percpu freelists. + * We first check how many percpu freelists have tags * * Then we iterate through the cpus until we find some tags - we don't attempt * to find the "best" cpu to steal from, to keep cacheline bouncing to a @@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool, struct percpu_ida_cpu *remote; for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); - cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; - cpus_have_tags--) { + cpus_have_tags; cpus_have_tags--) { cpu = cpumask_next(cpu, &pool->cpus_have_tags); if (cpu >= nr_cpu_ids) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 82166bf974e1..da23eb96779f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); BUG_ON(pmd_write(entry)); } else { struct page *page = pmd_page(*pmd); @@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = *pmd; - entry = pmd_mknuma(entry); + pmdp_set_numa(mm, addr, pmd); ret = HPAGE_PMD_NR; } } - - /* Set PMD if cleared earlier */ - if (ret == HPAGE_PMD_NR) - set_pmd_at(mm, addr, pmd, entry); - spin_unlock(ptl); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 7332c1785744..769a67a15803 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (pte_numa(ptent)) ptent = pte_mknonnuma(ptent); ptent = pte_modify(ptent, newprot); + /* + * Avoid taking write faults for pages we + * know to be dirty. + */ + if (dirty_accountable && pte_dirty(ptent)) + ptent = pte_mkwrite(ptent); + ptep_modify_prot_commit(mm, addr, pte, ptent); updated = true; } else { struct page *page; - ptent = *pte; page = vm_normal_page(vma, addr, oldpte); if (page && !PageKsm(page)) { if (!pte_numa(oldpte)) { - ptent = pte_mknuma(ptent); - set_pte_at(mm, addr, pte, ptent); + ptep_set_numa(mm, addr, pte); updated = true; } } } - - /* - * Avoid taking write faults for pages we know to be - * dirty. - */ - if (dirty_accountable && pte_dirty(ptent)) { - ptent = pte_mkwrite(ptent); - updated = true; - } - if (updated) pages++; - - /* Only !prot_numa always clears the pte */ - if (!prot_numa) - ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 512159bf607f..8323bced8e5b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr) size = bat_priv->num_ifaces * sizeof(uint8_t); orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); if (!orig_node->bat_iv.bcast_own_sum) - goto free_bcast_own; + goto free_orig_node; hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) - goto free_bcast_own; + goto free_orig_node; return orig_node; -free_bcast_own: - kfree(orig_node->bat_iv.bcast_own); free_orig_node: + /* free twice, as batadv_orig_node_new sets refcount to 2 */ + batadv_orig_node_free_ref(orig_node); batadv_orig_node_free_ref(orig_node); return NULL; @@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, struct batadv_orig_node *orig_neigh) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_neigh_node *neigh_node; + struct batadv_neigh_node *neigh_node, *tmp_neigh_node; neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); if (!neigh_node) @@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, neigh_node->orig_node = orig_neigh; neigh_node->if_incoming = hard_iface; - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Creating new neighbor %pM for orig_node %pM on interface %s\n", - neigh_addr, orig_node->orig, hard_iface->net_dev->name); - spin_lock_bh(&orig_node->neigh_list_lock); - hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface, + neigh_addr); + if (!tmp_neigh_node) { + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + } else { + kfree(neigh_node); + batadv_hardif_free_ref(hard_iface); + neigh_node = tmp_neigh_node; + } spin_unlock_bh(&orig_node->neigh_list_lock); + if (!tmp_neigh_node) + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Creating new neighbor %pM for orig_node %pM on interface %s\n", + neigh_addr, orig_node->orig, + hard_iface->net_dev->name); + out: return neigh_node; } diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3d417d3641c6..b851cc580853 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); const struct batadv_hard_iface *hard_iface; - int min_mtu = ETH_DATA_LEN; + int min_mtu = INT_MAX; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { @@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) } rcu_read_unlock(); - atomic_set(&bat_priv->packet_size_max, min_mtu); - if (atomic_read(&bat_priv->fragmentation) == 0) goto out; @@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); min_mtu -= sizeof(struct batadv_frag_packet); min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; - atomic_set(&bat_priv->packet_size_max, min_mtu); - - /* with fragmentation enabled we can fragment external packets easily */ - min_mtu = min_t(int, min_mtu, ETH_DATA_LEN); out: - return min_mtu - batadv_max_header_len(); + /* report to the other components the maximum amount of bytes that + * batman-adv can send over the wire (without considering the payload + * overhead). For example, this value is used by TT to compute the + * maximum local table table size + */ + atomic_set(&bat_priv->packet_size_max, min_mtu); + + /* the real soft-interface MTU is computed by removing the payload + * overhead from the maximum amount of bytes that was just computed. + * + * However batman-adv does not support MTUs bigger than ETH_DATA_LEN + */ + return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN); } /* adjusts the MTU if a new interface with a smaller MTU appeared. */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6df12a2e3605..853941629dc1 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -458,6 +458,42 @@ out: } /** + * batadv_neigh_node_get - retrieve a neighbour from the list + * @orig_node: originator which the neighbour belongs to + * @hard_iface: the interface where this neighbour is connected to + * @addr: the address of the neighbour + * + * Looks for and possibly returns a neighbour belonging to this originator list + * which is connected through the provided hard interface. + * Returns NULL if the neighbour is not found. + */ +struct batadv_neigh_node * +batadv_neigh_node_get(const struct batadv_orig_node *orig_node, + const struct batadv_hard_iface *hard_iface, + const uint8_t *addr) +{ + struct batadv_neigh_node *tmp_neigh_node, *res = NULL; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { + if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) + continue; + + if (tmp_neigh_node->if_incoming != hard_iface) + continue; + + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) + continue; + + res = tmp_neigh_node; + break; + } + rcu_read_unlock(); + + return res; +} + +/** * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object * @rcu: rcu pointer of the orig_ifinfo object */ diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 37be290f63f6..db3a9ed734cb 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node); struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_neigh_node * +batadv_neigh_node_get(const struct batadv_orig_node *orig_node, + const struct batadv_hard_iface *hard_iface, + const uint8_t *addr); +struct batadv_neigh_node * batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, const uint8_t *neigh_addr, struct batadv_orig_node *orig_node); diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 1ed9f7c9ecea..a953d5b196a3 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, int is_old_ttvn; /* check if there is enough data before accessing it */ - if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) + if (!pskb_may_pull(skb, hdr_len + ETH_HLEN)) return 0; /* create a copy of the skb (in case of for re-routing) to modify it. */ @@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb, if (ret != NET_RX_SUCCESS) ret = batadv_route_unicast_packet(skb, recv_if); + else + consume_skb(skb); return ret; } diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 579f5f00a385..843febd1e519 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, unsigned short vid) { - struct ethhdr *ethhdr = (struct ethhdr *)skb->data; + struct ethhdr *ethhdr; struct batadv_unicast_packet *unicast_packet; - int ret = NET_XMIT_DROP; + int ret = NET_XMIT_DROP, hdr_size; if (!orig_node) goto out; @@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, case BATADV_UNICAST: if (!batadv_send_skb_prepare_unicast(skb, orig_node)) goto out; + + hdr_size = sizeof(*unicast_packet); break; case BATADV_UNICAST_4ADDR: if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, orig_node, packet_subtype)) goto out; + + hdr_size = sizeof(struct batadv_unicast_4addr_packet); break; default: /* this function supports UNICAST and UNICAST_4ADDR only. It @@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, goto out; } + ethhdr = (struct ethhdr *)(skb->data + hdr_size); unicast_packet = (struct batadv_unicast_packet *)skb->data; /* inform the destination node that we are still missing a correct route diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b6071f675a3e..959dde721c46 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv, struct hlist_head *head; uint32_t i, crc_tmp, crc = 0; uint8_t flags; + __be16 tmp_vid; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv, orig_node)) continue; - crc_tmp = crc32c(0, &tt_common->vid, - sizeof(tt_common->vid)); + /* use network order to read the VID: this ensures that + * every node reads the bytes in the same order. + */ + tmp_vid = htons(tt_common->vid); + crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); /* compute the CRC on flags that have to be kept in sync * among nodes @@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv, struct hlist_head *head; uint32_t i, crc_tmp, crc = 0; uint8_t flags; + __be16 tmp_vid; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv, if (tt_common->flags & BATADV_TT_CLIENT_NEW) continue; - crc_tmp = crc32c(0, &tt_common->vid, - sizeof(tt_common->vid)); + /* use network order to read the VID: this ensures that + * every node reads the bytes in the same order. + */ + tmp_vid = htons(tt_common->vid); + crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); /* compute the CRC on flags that have to be kept in sync * among nodes @@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, { struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; struct batadv_orig_node_vlan *vlan; + uint32_t crc; int i; /* check if each received CRC matches the locally stored one */ @@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, if (!vlan) return false; - if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) + crc = vlan->tt.crc; + batadv_orig_node_vlan_free_ref(vlan); + + if (crc != ntohl(tt_vlan_tmp->crc)) return false; } @@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv, spin_lock_bh(&orig_node->tt_lock); - tt_change = (struct batadv_tvlv_tt_change *)tt_buff; batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, tt_change); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 292e619db896..d9fb93451442 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session) del_timer(&session->timer); } +static void hidp_process_report(struct hidp_session *session, + int type, const u8 *data, int len, int intr) +{ + if (len > HID_MAX_BUFFER_SIZE) + len = HID_MAX_BUFFER_SIZE; + + memcpy(session->input_buf, data, len); + hid_input_report(session->hid, type, session->input_buf, len, intr); +} + static void hidp_process_handshake(struct hidp_session *session, unsigned char param) { @@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb, hidp_input_report(session, skb); if (session->hid) - hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); + hidp_process_report(session, HID_INPUT_REPORT, + skb->data, skb->len, 0); break; case HIDP_DATA_RTYPE_OTHER: @@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session, hidp_input_report(session, skb); if (session->hid) { - hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); + hidp_process_report(session, HID_INPUT_REPORT, + skb->data, skb->len, 1); BT_DBG("report len %d", skb->len); } } else { diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index ab5241400cf7..8798492a6e99 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h @@ -24,6 +24,7 @@ #define __HIDP_H #include <linux/types.h> +#include <linux/hid.h> #include <linux/kref.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/l2cap.h> @@ -179,6 +180,9 @@ struct hidp_session { /* Used in hidp_output_raw_report() */ int output_report_success; /* boolean */ + + /* temporary input buffer */ + u8 input_buf[HID_MAX_BUFFER_SIZE]; }; /* HIDP init defines */ diff --git a/net/core/dev.c b/net/core/dev.c index 4ad1b78c9c77..b1b0c8d4d7df 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); * 2. No high memory really exists on this machine. */ -static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) +static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; @@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) } static netdev_features_t harmonize_features(struct sk_buff *skb, - netdev_features_t features) + const struct net_device *dev, + netdev_features_t features) { if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, skb_network_protocol(skb))) { features &= ~NETIF_F_ALL_CSUM; - } else if (illegal_highdma(skb->dev, skb)) { + } else if (illegal_highdma(dev, skb)) { features &= ~NETIF_F_SG; } return features; } -netdev_features_t netif_skb_features(struct sk_buff *skb) +netdev_features_t netif_skb_dev_features(struct sk_buff *skb, + const struct net_device *dev) { __be16 protocol = skb->protocol; - netdev_features_t features = skb->dev->features; + netdev_features_t features = dev->features; - if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) + if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) features &= ~NETIF_F_GSO_MASK; if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } else if (!vlan_tx_tag_present(skb)) { - return harmonize_features(skb, features); + return harmonize_features(skb, dev, features); } - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | + features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) @@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - return harmonize_features(skb, features); + return harmonize_features(skb, dev, features); } -EXPORT_SYMBOL(netif_skb_features); +EXPORT_SYMBOL(netif_skb_dev_features); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 87577d447554..e29e810663d7 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb) return poff; } -static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) -{ - if (unlikely(queue_index >= dev->real_num_tx_queues)) { - net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", - dev->name, queue_index, - dev->real_num_tx_queues); - return 0; - } - return queue_index; -} - static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS @@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) #endif } -u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) { struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); @@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) return queue_index; } -EXPORT_SYMBOL(__netdev_pick_tx); struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, @@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) - queue_index = ops->ndo_select_queue(dev, skb, - accel_priv); + queue_index = ops->ndo_select_queue(dev, skb, accel_priv, + __netdev_pick_tx); else queue_index = __netdev_pick_tx(dev, skb); if (!accel_priv) - queue_index = dev_cap_txqueue(dev, queue_index); + queue_index = netdev_cap_txqueue(dev, queue_index); } skb_set_queue_mapping(skb, queue_index); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 7b2ad564b303..fc122fdb266a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1968,16 +1968,21 @@ replay: dev->ifindex = ifm->ifi_index; - if (ops->newlink) + if (ops->newlink) { err = ops->newlink(net, dev, tb, data); - else + /* Drivers should call free_netdev() in ->destructor + * and unregister it on failure so that device could be + * finally freed in rtnl_unlock. + */ + if (err < 0) + goto out; + } else { err = register_netdevice(dev); - - if (err < 0) { - free_netdev(dev); - goto out; + if (err < 0) { + free_netdev(dev); + goto out; + } } - err = rtnl_configure_link(dev, ifm); if (err < 0) unregister_netdevice(dev); diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index c073b81a1f3e..62b5828acde0 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c @@ -8,7 +8,7 @@ #include "tfrc.h" #ifdef CONFIG_IP_DCCP_TFRC_DEBUG -static bool tfrc_debug; +bool tfrc_debug; module_param(tfrc_debug, bool, 0644); MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); #endif diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index a3d8f7c76ae0..40ee7d62b652 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h @@ -21,6 +21,7 @@ #include "packet_history.h" #ifdef CONFIG_IP_DCCP_TFRC_DEBUG +extern bool tfrc_debug; #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) #else #define tfrc_pr_debug(format, a...) diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index d9d929042a89..be8abe73bb9f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -39,6 +39,71 @@ #include <net/route.h> #include <net/xfrm.h> +static bool ip_may_fragment(const struct sk_buff *skb) +{ + return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || + !skb->local_df; +} + +static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) +{ + if (skb->len <= mtu || skb->local_df) + return false; + + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) + return false; + + return true; +} + +static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb) +{ + unsigned int mtu; + + if (skb->local_df || !skb_is_gso(skb)) + return false; + + mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true); + + /* if seglen > mtu, do software segmentation for IP fragmentation on + * output. DF bit cannot be set since ip_forward would have sent + * icmp error. + */ + return skb_gso_network_seglen(skb) > mtu; +} + +/* called if GSO skb needs to be fragmented on forward */ +static int ip_forward_finish_gso(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + netdev_features_t features; + struct sk_buff *segs; + int ret = 0; + + features = netif_skb_dev_features(skb, dst->dev); + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); + if (IS_ERR(segs)) { + kfree_skb(skb); + return -ENOMEM; + } + + consume_skb(skb); + + do { + struct sk_buff *nskb = segs->next; + int err; + + segs->next = NULL; + err = dst_output(segs); + + if (err && ret == 0) + ret = err; + segs = nskb; + } while (segs); + + return ret; +} + static int ip_forward_finish(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); @@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb) if (unlikely(opt->optlen)) ip_forward_options(skb); + if (ip_gso_exceeds_dst_mtu(skb)) + return ip_forward_finish_gso(skb); + return dst_output(skb); } @@ -92,8 +160,7 @@ int ip_forward(struct sk_buff *skb) IPCB(skb)->flags |= IPSKB_FORWARDED; mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); - if (unlikely(skb->len > mtu && !skb_is_gso(skb) && - (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { + if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index efa1138fa523..b3e86ea7b71b 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -273,7 +273,7 @@ static int __init ic_open_devs(void) msleep(1); - if time_before(jiffies, next_msg) + if (time_before(jiffies, next_msg)) continue; elapsed = jiffies_to_msecs(jiffies - start); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 271554c61276..11e4384daaf9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1596,6 +1596,7 @@ static int __mkroute_input(struct sk_buff *skb, rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); + RT_CACHE_STAT_INC(in_slow_tot); rth->dst.input = ip_forward; rth->dst.output = ip_output; @@ -1694,10 +1695,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res); - if (err != 0) + if (err != 0) { + if (!IN_DEV_FORWARD(in_dev)) + err = -EHOSTUNREACH; goto no_route; - - RT_CACHE_STAT_INC(in_slow_tot); + } if (res.type == RTN_BROADCAST) goto brd_input; @@ -1711,8 +1713,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto local_input; } - if (!IN_DEV_FORWARD(in_dev)) + if (!IN_DEV_FORWARD(in_dev)) { + err = -EHOSTUNREACH; goto no_route; + } if (res.type != RTN_UNICAST) goto martian_destination; @@ -1767,6 +1771,7 @@ local_input: rth->rt_gateway = 0; rth->rt_uses_gateway = 0; INIT_LIST_HEAD(&rth->rt_uncached); + RT_CACHE_STAT_INC(in_slow_tot); if (res.type == RTN_UNREACHABLE) { rth->dst.input= ip_error; rth->dst.error= -err; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ad235690684c..fdbfeca36d63 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev) ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) addrconf_add_linklocal(idev, &addr); + else + addrconf_prefix_route(&addr, 64, dev, 0, 0); } #endif diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ef02b26ccf81..070a2fae2375 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) return mtu; } +static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) +{ + if (skb->len <= mtu || skb->local_df) + return false; + + if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) + return true; + + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) + return false; + + return true; +} + int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); @@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || - (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { + if (ip6_pkt_too_big(skb, mtu)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d6d1f1df9119..ce1c44370610 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1057,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, + select_queue_fallback_t fallback) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); } @@ -1075,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, + select_queue_fallback_t fallback) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6a2bb37506c5..48a6a93db296 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po) return po->xmit == packet_direct_xmit; } -static u16 packet_pick_tx_queue(struct net_device *dev) +static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; } +static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) +{ + const struct net_device_ops *ops = dev->netdev_ops; + u16 queue_index; + + if (ops->ndo_select_queue) { + queue_index = ops->ndo_select_queue(dev, skb, NULL, + __packet_pick_tx_queue); + queue_index = netdev_cap_txqueue(dev, queue_index); + } else { + queue_index = __packet_pick_tx_queue(dev, skb); + } + + skb_set_queue_mapping(skb, queue_index); +} + /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). @@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) } } - skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); + packet_pick_tx_queue(dev, skb); + skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); packet_inc_pending(&po->tx_ring); @@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); + + packet_pick_tx_queue(dev, skb); if (po->has_vnet_hdr) { if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { @@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, */ if (!tx_ring) init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); - break; + break; default: break; } diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index a255d0200a59..fefeeb73f15f 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -15,6 +15,11 @@ * * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> * University of Oslo, Norway. + * + * References: + * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00 + * IEEE Conference on High Performance Switching and Routing 2013 : + * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem" */ #include <linux/module.h> @@ -36,7 +41,7 @@ struct pie_params { psched_time_t target; /* user specified target delay in pschedtime */ u32 tupdate; /* timer frequency (in jiffies) */ u32 limit; /* number of packets that can be enqueued */ - u32 alpha; /* alpha and beta are between -4 and 4 */ + u32 alpha; /* alpha and beta are between 0 and 32 */ u32 beta; /* and are used for shift relative to 1 */ bool ecn; /* true if ecn is enabled */ bool bytemode; /* to scale drop early prob based on pkt size */ @@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch) if (qdelay == 0 && qlen != 0) update_prob = false; - /* Add ranges for alpha and beta, more aggressive for high dropping - * mode and gentle steps for light dropping mode - * In light dropping mode, take gentle steps; in medium dropping mode, - * take medium steps; in high dropping mode, take big steps. + /* In the algorithm, alpha and beta are between 0 and 2 with typical + * value for alpha as 0.125. In this implementation, we use values 0-32 + * passed from user space to represent this. Also, alpha and beta have + * unit of HZ and need to be scaled before they can used to update + * probability. alpha/beta are updated locally below by 1) scaling them + * appropriately 2) scaling down by 16 to come to 0-2 range. + * Please see paper for details. + * + * We scale alpha and beta differently depending on whether we are in + * light, medium or high dropping mode. */ if (q->vars.prob < MAX_PROB / 100) { alpha = diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5ae609200674..f558433537b8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) return false; } -/* Increase asoc's rwnd by len and send any window update SACK if needed. */ -void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) +/* Update asoc's rwnd for the approximated state in the buffer, + * and check whether SACK needs to be sent. + */ +void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) { + int rx_count; struct sctp_chunk *sack; struct timer_list *timer; - if (asoc->rwnd_over) { - if (asoc->rwnd_over >= len) { - asoc->rwnd_over -= len; - } else { - asoc->rwnd += (len - asoc->rwnd_over); - asoc->rwnd_over = 0; - } - } else { - asoc->rwnd += len; - } + if (asoc->ep->rcvbuf_policy) + rx_count = atomic_read(&asoc->rmem_alloc); + else + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); - /* If we had window pressure, start recovering it - * once our rwnd had reached the accumulated pressure - * threshold. The idea is to recover slowly, but up - * to the initial advertised window. - */ - if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { - int change = min(asoc->pathmtu, asoc->rwnd_press); - asoc->rwnd += change; - asoc->rwnd_press -= change; - } + if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) + asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; + else + asoc->rwnd = 0; - pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, - asoc->a_rwnd); + pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", + __func__, asoc, asoc->rwnd, rx_count, + asoc->base.sk->sk_rcvbuf); /* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. * The algorithm used is similar to the one described in * Section 4.2.3.3 of RFC 1122. */ - if (sctp_peer_needs_update(asoc)) { + if (update_peer && sctp_peer_needs_update(asoc)) { asoc->a_rwnd = asoc->rwnd; pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " @@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) } } -/* Decrease asoc's rwnd by len. */ -void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) -{ - int rx_count; - int over = 0; - - if (unlikely(!asoc->rwnd || asoc->rwnd_over)) - pr_debug("%s: association:%p has asoc->rwnd:%u, " - "asoc->rwnd_over:%u!\n", __func__, asoc, - asoc->rwnd, asoc->rwnd_over); - - if (asoc->ep->rcvbuf_policy) - rx_count = atomic_read(&asoc->rmem_alloc); - else - rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); - - /* If we've reached or overflowed our receive buffer, announce - * a 0 rwnd if rwnd would still be positive. Store the - * the potential pressure overflow so that the window can be restored - * back to original value. - */ - if (rx_count >= asoc->base.sk->sk_rcvbuf) - over = 1; - - if (asoc->rwnd >= len) { - asoc->rwnd -= len; - if (over) { - asoc->rwnd_press += asoc->rwnd; - asoc->rwnd = 0; - } - } else { - asoc->rwnd_over = len - asoc->rwnd; - asoc->rwnd = 0; - } - - pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, - asoc->rwnd_press); -} /* Build the bind address list for the association based on info from the * local endpoint and the remote peer. diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 483dcd71b3c5..591b44d3b7de 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, * PMTU. In cases, such as loopback, this might be a rather * large spill over. */ - if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || + if ((!chunk->data_accepted) && (!asoc->rwnd || (datalen > asoc->rwnd + asoc->frag_point))) { /* If this is the next TSN, consider reneging to make diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e91d6e5df63..981aaf8b6ace 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -64,6 +64,7 @@ #include <linux/crypto.h> #include <linux/slab.h> #include <linux/file.h> +#include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> @@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk, /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library - * can avoid any unnecessary allocations. The only defferent part + * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the - * addrs_num structure member. That way we can re-use the existing + * addrs_num structure member. That way we can re-use the existing * code. */ +#ifdef CONFIG_COMPAT +struct compat_sctp_getaddrs_old { + sctp_assoc_t assoc_id; + s32 addr_num; + compat_uptr_t addrs; /* struct sockaddr * */ +}; +#endif + static int sctp_getsockopt_connectx3(struct sock *sk, int len, char __user *optval, int __user *optlen) @@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len, sctp_assoc_t assoc_id = 0; int err = 0; - if (len < sizeof(param)) - return -EINVAL; +#ifdef CONFIG_COMPAT + if (is_compat_task()) { + struct compat_sctp_getaddrs_old param32; - if (copy_from_user(¶m, optval, sizeof(param))) - return -EFAULT; + if (len < sizeof(param32)) + return -EINVAL; + if (copy_from_user(¶m32, optval, sizeof(param32))) + return -EFAULT; - err = __sctp_setsockopt_connectx(sk, - (struct sockaddr __user *)param.addrs, - param.addr_num, &assoc_id); + param.assoc_id = param32.assoc_id; + param.addr_num = param32.addr_num; + param.addrs = compat_ptr(param32.addrs); + } else +#endif + { + if (len < sizeof(param)) + return -EINVAL; + if (copy_from_user(¶m, optval, sizeof(param))) + return -EFAULT; + } + err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) + param.addrs, param.addr_num, + &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; @@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); - /* When only partial message is copied to the user, increase - * rwnd by that amount. If all the data in the skb is read, - * rwnd is updated when the event is freed. - */ - if (!sctp_ulpevent_is_notification(event)) - sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7135e617ab0f..35c8923b5554 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = { }, { .procname = "cookie_hmac_alg", + .data = &init_net.sctp.sctp_hmac_alg, .maxlen = 8, .mode = 0644, .proc_handler = proc_sctp_do_hmac_alg, @@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, int sctp_sysctl_net_register(struct net *net) { - struct ctl_table *table; - int i; + struct ctl_table *table = sctp_net_table; + + if (!net_eq(net, &init_net)) { + int i; - table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); - if (!table) - return -ENOMEM; + table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); + if (!table) + return -ENOMEM; - for (i = 0; table[i].data; i++) - table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; + for (i = 0; table[i].data; i++) + table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; + } net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); return 0; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 85c64658bd0b..8d198ae03606 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, skb = sctp_event2skb(event); /* Set the owner and charge rwnd for bytes received. */ sctp_ulpevent_set_owner(event, asoc); - sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); + sctp_assoc_rwnd_update(asoc, false); if (!skb->data_len) return; @@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) { struct sk_buff *skb, *frag; unsigned int len; + struct sctp_association *asoc; /* Current stack structures assume that the rcv buffer is * per socket. For UDP style sockets this is not true as @@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) } done: - sctp_assoc_rwnd_increase(event->asoc, len); + asoc = event->asoc; + sctp_association_hold(asoc); sctp_ulpevent_release_owner(event); + sctp_assoc_rwnd_update(asoc, true); + sctp_association_put(asoc); } static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) diff --git a/net/tipc/core.h b/net/tipc/core.h index 1ff477b0450d..5569d96b4da3 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer) struct tipc_skb_cb { void *handle; + bool deferred; }; #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) diff --git a/net/tipc/link.c b/net/tipc/link.c index e4f233d58d35..284d6383ad6c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1408,6 +1408,12 @@ static int link_recv_buf_validate(struct sk_buff *buf) u32 hdr_size; u32 min_hdr_size; + /* If this packet comes from the defer queue, the skb has already + * been validated + */ + if (unlikely(TIPC_SKB_CB(buf)->deferred)) + return 1; + if (unlikely(buf->len < MIN_H_SIZE)) return 0; @@ -1717,6 +1723,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, &l_ptr->newest_deferred_in, buf)) { l_ptr->deferred_inqueue_sz++; l_ptr->stats.deferred_recv++; + TIPC_SKB_CB(buf)->deferred = true; if ((l_ptr->deferred_inqueue_sz % 16) == 1) tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } else diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 23708636b05c..25e5cb0aaef6 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -210,8 +210,8 @@ static void do_usb_entry(void *symval, range_lo < 0x9 ? "[%X-9" : "[%X", range_lo); sprintf(alias + strlen(alias), - range_hi > 0xA ? "a-%X]" : "%X]", - range_lo); + range_hi > 0xA ? "A-%X]" : "%X]", + range_hi); } } if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1)) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index ec4536c8d8d4..dafcf82139e2 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -932,7 +932,7 @@ int snd_hda_bus_new(struct snd_card *card, } EXPORT_SYMBOL_GPL(snd_hda_bus_new); -#ifdef CONFIG_SND_HDA_GENERIC +#if IS_ENABLED(CONFIG_SND_HDA_GENERIC) #define is_generic_config(codec) \ (codec->modelname && !strcmp(codec->modelname, "generic")) #else @@ -1339,23 +1339,15 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid) /* * Dynamic symbol binding for the codec parsers */ -#ifdef MODULE -#define load_parser_sym(sym) ((int (*)(struct hda_codec *))symbol_request(sym)) -#define unload_parser_addr(addr) symbol_put_addr(addr) -#else -#define load_parser_sym(sym) (sym) -#define unload_parser_addr(addr) do {} while (0) -#endif #define load_parser(codec, sym) \ - ((codec)->parser = load_parser_sym(sym)) + ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym)) static void unload_parser(struct hda_codec *codec) { - if (codec->parser) { - unload_parser_addr(codec->parser); - codec->parser = NULL; - } + if (codec->parser) + symbol_put_addr(codec->parser); + codec->parser = NULL; } /* @@ -1570,7 +1562,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec) EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets); -#ifdef CONFIG_SND_HDA_CODEC_HDMI +#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) /* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */ static bool is_likely_hdmi_codec(struct hda_codec *codec) { @@ -1620,12 +1612,20 @@ int snd_hda_codec_configure(struct hda_codec *codec) patch = codec->preset->patch; if (!patch) { unload_parser(codec); /* to be sure */ - if (is_likely_hdmi_codec(codec)) + if (is_likely_hdmi_codec(codec)) { +#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI) patch = load_parser(codec, snd_hda_parse_hdmi_codec); -#ifdef CONFIG_SND_HDA_GENERIC - if (!patch) +#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI) + patch = snd_hda_parse_hdmi_codec; +#endif + } + if (!patch) { +#if IS_MODULE(CONFIG_SND_HDA_GENERIC) patch = load_parser(codec, snd_hda_parse_generic_codec); +#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC) + patch = snd_hda_parse_generic_codec; #endif + } if (!patch) { printk(KERN_ERR "hda-codec: No codec parser is available\n"); return -ENODEV; diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 8321a97d5c05..d9a09bdd09db 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3269,7 +3269,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, mutex_unlock(&codec->control_mutex); snd_hda_codec_flush_cache(codec); /* flush the updates */ if (err >= 0 && spec->cap_sync_hook) - spec->cap_sync_hook(codec, ucontrol); + spec->cap_sync_hook(codec, kcontrol, ucontrol); return err; } @@ -3390,7 +3390,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol, return ret; if (spec->cap_sync_hook) - spec->cap_sync_hook(codec, ucontrol); + spec->cap_sync_hook(codec, kcontrol, ucontrol); return ret; } @@ -3795,7 +3795,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx, return 0; snd_hda_activate_path(codec, path, true, false); if (spec->cap_sync_hook) - spec->cap_sync_hook(codec, NULL); + spec->cap_sync_hook(codec, NULL, NULL); path_power_down_sync(codec, old_path); return 1; } @@ -5270,7 +5270,7 @@ static void init_input_src(struct hda_codec *codec) } if (spec->cap_sync_hook) - spec->cap_sync_hook(codec, NULL); + spec->cap_sync_hook(codec, NULL, NULL); } /* set right pin controls for digital I/O */ diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 07f767231c9f..c908afbe4d94 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -274,6 +274,7 @@ struct hda_gen_spec { void (*init_hook)(struct hda_codec *codec); void (*automute_hook)(struct hda_codec *codec); void (*cap_sync_hook)(struct hda_codec *codec, + struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); /* PCM hooks */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fa2879a21a50..e354ab1ec20f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -198,7 +198,7 @@ MODULE_DESCRIPTION("Intel HDA driver"); #endif #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO) -#ifdef CONFIG_SND_HDA_CODEC_HDMI +#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) #define SUPPORT_VGA_SWITCHEROO #endif #endif diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 4e0ec146553d..bcf91bea3317 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3291,7 +3291,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec) } static void cxt_update_headset_mode_hook(struct hda_codec *codec, - struct snd_ctl_elem_value *ucontrol) + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { cxt_update_headset_mode(codec); } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d9693ca9546f..a9a83b85517a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -708,7 +708,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force) } static void alc_inv_dmic_hook(struct hda_codec *codec, - struct snd_ctl_elem_value *ucontrol) + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { alc_inv_dmic_sync(codec, false); } @@ -3218,7 +3219,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled) /* turn on/off mic-mute LED per capture hook */ static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec, - struct snd_ctl_elem_value *ucontrol) + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct alc_spec *spec = codec->spec; unsigned int oldval = spec->gpio_led; @@ -3528,7 +3530,8 @@ static void alc_update_headset_mode(struct hda_codec *codec) } static void alc_update_headset_mode_hook(struct hda_codec *codec, - struct snd_ctl_elem_value *ucontrol) + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { alc_update_headset_mode(codec); } @@ -4329,6 +4332,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), + SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), @@ -4434,9 +4438,6 @@ static void alc269_fill_coef(struct hda_codec *codec) if (spec->codec_variant != ALC269_TYPE_ALC269VB) return; - /* ALC271X doesn't seem to support these COEFs (bko#52181) */ - if (!strcmp(codec->chip_name, "ALC271X")) - return; if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { alc_write_coef_idx(codec, 0xf, 0x960b); @@ -5106,6 +5107,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE), SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE), SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 6998cf29b9bc..7311badf6a94 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -194,7 +194,7 @@ struct sigmatel_spec { int default_polarity; unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */ - bool mic_mute_led_on; /* current mic mute state */ + unsigned int mic_enabled; /* current mic mute state (bitmask) */ /* stream */ unsigned int stream_delay; @@ -324,19 +324,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask, /* hook for controlling mic-mute LED GPIO */ static void stac_capture_led_hook(struct hda_codec *codec, - struct snd_ctl_elem_value *ucontrol) + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct sigmatel_spec *spec = codec->spec; - bool mute; + unsigned int mask; + bool cur_mute, prev_mute; - if (!ucontrol) + if (!kcontrol || !ucontrol) return; - mute = !(ucontrol->value.integer.value[0] || - ucontrol->value.integer.value[1]); - if (spec->mic_mute_led_on != mute) { - spec->mic_mute_led_on = mute; - if (mute) + mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); + prev_mute = !spec->mic_enabled; + if (ucontrol->value.integer.value[0] || + ucontrol->value.integer.value[1]) + spec->mic_enabled |= mask; + else + spec->mic_enabled &= ~mask; + cur_mute = !spec->mic_enabled; + if (cur_mute != prev_mute) { + if (cur_mute) spec->gpio_data |= spec->mic_mute_led_gpio; else spec->gpio_data &= ~spec->mic_mute_led_gpio; @@ -4462,7 +4469,7 @@ static void stac_setup_gpio(struct hda_codec *codec) if (spec->mic_mute_led_gpio) { spec->gpio_mask |= spec->mic_mute_led_gpio; spec->gpio_dir |= spec->mic_mute_led_gpio; - spec->mic_mute_led_on = true; + spec->mic_enabled = 0; spec->gpio_data |= spec->mic_mute_led_gpio; spec->gen.cap_sync_hook = stac_capture_led_hook; diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 5799fbc24c28..8fe3b8c18ed4 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -39,6 +39,7 @@ static void update_tpacpi_mute_led(void *private_data, int enabled) } static void update_tpacpi_micmute_led(struct hda_codec *codec, + struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { if (!ucontrol || !led_set_func) diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index be456ce264d0..8ca405cd7c1a 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -24,6 +24,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/uaccess.h> #include <linux/irqchip/arm-gic.h> diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 88b2fe3ddf42..00d86427af0f 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, list_add_tail(&dev->list, &kvm->coalesced_zones); mutex_unlock(&kvm->slots_lock); - return ret; + return 0; out_free_dev: mutex_unlock(&kvm->slots_lock); - kfree(dev); - if (dev == NULL) - return -ENXIO; - - return 0; + return ret; } int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, |