diff options
author | Takashi Iwai <tiwai@suse.de> | 2015-12-21 11:46:30 +0100 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2015-12-21 11:46:30 +0100 |
commit | 1d9d4495001d3c470e5c902ff35a6aa626924fc1 (patch) | |
tree | 1ae39f4ccadb610bc16fea88a1f93b23a4df37b1 | |
parent | ALSA: fm801: store struct device instead of pci_dev (diff) | |
parent | ALSA: hda - Move audio component accesses to hdac_i915.c (diff) | |
download | linux-1d9d4495001d3c470e5c902ff35a6aa626924fc1.tar.xz linux-1d9d4495001d3c470e5c902ff35a6aa626924fc1.zip |
Merge branch 'topic/hdmi-jack' into for-next
688 files changed, 14177 insertions, 8547 deletions
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 201dcd3c2e9d..03f01e76add7 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -615,18 +615,6 @@ char *date;</synopsis> <function>drm_gem_object_init</function>. Storage for private GEM objects must be managed by drivers. </para> - <para> - Drivers that do not need to extend GEM objects with private information - can call the <function>drm_gem_object_alloc</function> function to - allocate and initialize a struct <structname>drm_gem_object</structname> - instance. The GEM core will call the optional driver - <methodname>gem_init_object</methodname> operation after initializing - the GEM object with <function>drm_gem_object_init</function>. - <synopsis>int (*gem_init_object) (struct drm_gem_object *obj);</synopsis> - </para> - <para> - No alloc-and-init function exists for private GEM objects. - </para> </sect3> <sect3> <title>GEM Objects Lifetime</title> @@ -635,10 +623,10 @@ char *date;</synopsis> acquired and release by <function>calling drm_gem_object_reference</function> and <function>drm_gem_object_unreference</function> respectively. The caller must hold the <structname>drm_device</structname> - <structfield>struct_mutex</structfield> lock. As a convenience, GEM - provides the <function>drm_gem_object_reference_unlocked</function> and - <function>drm_gem_object_unreference_unlocked</function> functions that - can be called without holding the lock. + <structfield>struct_mutex</structfield> lock when calling + <function>drm_gem_object_reference</function>. As a convenience, GEM + provides <function>drm_gem_object_unreference_unlocked</function> + functions that can be called without holding the lock. </para> <para> When the last reference to a GEM object is released the GEM core calls @@ -649,15 +637,9 @@ char *date;</synopsis> </para> <para> <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis> - Drivers are responsible for freeing all GEM object resources, including - the resources created by the GEM core. If an mmap offset has been - created for the object (in which case - <structname>drm_gem_object</structname>::<structfield>map_list</structfield>::<structfield>map</structfield> - is not NULL) it must be freed by a call to - <function>drm_gem_free_mmap_offset</function>. The shmfs backing store - must be released by calling <function>drm_gem_object_release</function> - (that function can safely be called if no shmfs backing store has been - created). + Drivers are responsible for freeing all GEM object resources. This includes + the resources created by the GEM core, which need to be released with + <function>drm_gem_object_release</function>. </para> </sect3> <sect3> @@ -740,17 +722,10 @@ char *date;</synopsis> DRM identifies the GEM object to be mapped by a fake offset passed through the mmap offset argument. Prior to being mapped, a GEM object must thus be associated with a fake offset. To do so, drivers must call - <function>drm_gem_create_mmap_offset</function> on the object. The - function allocates a fake offset range from a pool and stores the - offset divided by PAGE_SIZE in - <literal>obj->map_list.hash.key</literal>. Care must be taken not to - call <function>drm_gem_create_mmap_offset</function> if a fake offset - has already been allocated for the object. This can be tested by - <literal>obj->map_list.map</literal> being non-NULL. + <function>drm_gem_create_mmap_offset</function> on the object. </para> <para> Once allocated, the fake offset value - (<literal>obj->map_list.hash.key << PAGE_SHIFT</literal>) must be passed to the application in a driver-specific way and can then be used as the mmap offset argument. </para> @@ -836,10 +811,11 @@ char *date;</synopsis> abstracted from the client in libdrm. </para> </sect3> - <sect3> - <title>GEM Function Reference</title> + </sect2> + <sect2> + <title>GEM Function Reference</title> !Edrivers/gpu/drm/drm_gem.c - </sect3> +!Iinclude/drm/drm_gem.h </sect2> <sect2> <title>VMA Offset Manager</title> @@ -4201,17 +4177,21 @@ int num_ioctls;</synopsis> </sect2> </sect1> <sect1> - <title>GuC-based Command Submission</title> + <title>GuC</title> <sect2> - <title>GuC</title> + <title>GuC-specific firmware loader</title> !Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader !Idrivers/gpu/drm/i915/intel_guc_loader.c </sect2> <sect2> - <title>GuC Client</title> -!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison + <title>GuC-based command submission</title> +!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission !Idrivers/gpu/drm/i915/i915_guc_submission.c </sect2> + <sect2> + <title>GuC Firmware Layout</title> +!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout + </sect2> </sect1> <sect1> diff --git a/Documentation/arm/keystone/Overview.txt b/Documentation/arm/keystone/Overview.txt index f17bc4c9dff9..400c0c270d2e 100644 --- a/Documentation/arm/keystone/Overview.txt +++ b/Documentation/arm/keystone/Overview.txt @@ -49,24 +49,6 @@ specified through DTS. Following are the DTS used:- The device tree documentation for the keystone machines are located at Documentation/devicetree/bindings/arm/keystone/keystone.txt -Known issues & workaround -------------------------- - -Some of the device drivers used on keystone are re-used from that from -DaVinci and other TI SoCs. These device drivers may use clock APIs directly. -Some of the keystone specific drivers such as netcp uses run time power -management API instead to enable clock. As this API has limitations on -keystone, following workaround is needed to boot Linux. - - Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise - clock frameworks will try to disable clocks that are unused and disable - the hardware. This is because netcp related power domain and clock - domains are enabled in u-boot as run time power management API currently - doesn't enable clocks for netcp due to a limitation. This workaround is - expected to be removed in the future when proper API support becomes - available. Until then, this work around is needed. - - Document Author --------------- Murali Karicheri <m-karicheri2@ti.com> diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 2f6c6ff7161d..d8880ca30af4 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0 parameter. 1: The multi-queue block layer is instantiated with a hardware dispatch queue for each CPU node in the system. + +use_lightnvm=[0/1]: Default: 0 + Register device with LightNVM. Requires blk-mq to be used. diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index f5a8ca29aff0..aeea50c84e92 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -8,6 +8,11 @@ Required properties: - phy-mode: See ethernet.txt file in the same directory - clocks: a pointer to the reference clock for this device. +Optional properties: +- tx-csum-limit: maximum mtu supported by port that allow TX checksum. + Value is presented in bytes. If not used, by default 1600B is set for + "marvell,armada-370-neta" and 9800B for others. + Example: ethernet@d0070000 { @@ -15,6 +20,7 @@ ethernet@d0070000 { reg = <0xd0070000 0x2500>; interrupts = <8>; clocks = <&gate_clk 4>; + tx-csum-limit = <9800> status = "okay"; phy = <&phy0>; phy-mode = "rgmii-id"; diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt index b38200d2583a..0dfa60d88dd3 100644 --- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt @@ -1,7 +1,9 @@ * Temperature Sensor ADC (TSADC) on rockchip SoCs Required properties: -- compatible : "rockchip,rk3288-tsadc" +- compatible : should be "rockchip,<name>-tsadc" + "rockchip,rk3288-tsadc": found on RK3288 SoCs + "rockchip,rk3368-tsadc": found on RK3368 SoCs - reg : physical base address of the controller and length of memory mapped region. - interrupts : The interrupt number to the cpu. The interrupt specifier format diff --git a/MAINTAINERS b/MAINTAINERS index 050d0e77a2cf..69c8a9c3289a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com> L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi S: Supported -F: drivers/acpi/video.c +F: drivers/acpi/acpi_video.c ACPI WMI DRIVER L: platform-driver-x86@vger.kernel.org @@ -1847,7 +1847,7 @@ S: Supported F: drivers/net/wireless/ath/ath6kl/ WILOCITY WIL6210 WIRELESS DRIVER -M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com> +M: Maya Erez <qca_merez@qca.qualcomm.com> L: linux-wireless@vger.kernel.org L: wil6210@qca.qualcomm.com S: Supported @@ -1931,7 +1931,7 @@ S: Supported F: drivers/i2c/busses/i2c-at91.c ATMEL ISI DRIVER -M: Josh Wu <josh.wu@atmel.com> +M: Ludovic Desroches <ludovic.desroches@atmel.com> L: linux-media@vger.kernel.org S: Supported F: drivers/media/platform/soc_camera/atmel-isi.c @@ -1950,7 +1950,8 @@ S: Supported F: drivers/net/ethernet/cadence/ ATMEL NAND DRIVER -M: Josh Wu <josh.wu@atmel.com> +M: Wenyou Yang <wenyou.yang@atmel.com> +M: Josh Wu <rainyfeeling@outlook.com> L: linux-mtd@lists.infradead.org S: Supported F: drivers/mtd/nand/atmel_nand* @@ -6366,6 +6367,7 @@ F: arch/*/include/asm/pmem.h LIGHTNVM PLATFORM SUPPORT M: Matias Bjorling <mb@lightnvm.io> W: http://github/OpenChannelSSD +L: linux-block@vger.kernel.org S: Maintained F: drivers/lightnvm/ F: include/linux/lightnvm.h @@ -9425,8 +9427,10 @@ F: include/scsi/sg.h SCSI SUBSYSTEM M: "James E.J. Bottomley" <JBottomley@odin.com> -L: linux-scsi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git +M: "Martin K. Petersen" <martin.petersen@oracle.com> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git +L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/ F: include/scsi/ @@ -10901,9 +10905,9 @@ S: Maintained F: drivers/media/tuners/tua9001* TULIP NETWORK DRIVERS -M: Grant Grundler <grundler@parisc-linux.org> L: netdev@vger.kernel.org -S: Maintained +L: linux-parisc@vger.kernel.org +S: Orphan F: drivers/net/ethernet/dec/tulip/ TUN/TAP driver @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc4 NAME = Blurry Fish Butt # *DOCUMENTATION* diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index c92c0ef1e9d2..f1ac9818b751 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index cfac24e0e7b6..323486d6ee83 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 9922a118a15a..66191cd0447e 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index f761a7c70761..f68838e8068a 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index dc6f74f41283..96bd1c20fb0b 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 3fef0a210c56..fcae66683ca0 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 51784837daae..b01b659168ea 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index ef35ef3923dd..a07f20de221b 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 634509e5e572..f36c047b33ca 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -1,4 +1,4 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +CONFIG_CROSS_COMPILE="arc-linux-" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index ad481c24070d..258b0e5ad332 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -37,6 +37,9 @@ #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ (ARCV2_IRQ_DEF_PRIO << 1)) +/* SLEEP needs default irq priority (<=) which can interrupt the doze */ +#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO) + #ifndef __ASSEMBLY__ /* diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index d8c608174617..c1d36458bfb7 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h @@ -43,6 +43,8 @@ #define ISA_INIT_STATUS_BITS STATUS_IE_MASK +#define ISA_SLEEP_ARG 0x3 + #ifndef __ASSEMBLY__ /****************************************************************** diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index c14a5bea0c76..5d446df2c413 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) "st sp, [r24] \n\t" #endif - "sync \n\t" - /* * setup _current_task with incoming tsk. * optionally, set r25 to that as well diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S index e248594097e7..e6890b1f8650 100644 --- a/arch/arc/kernel/ctx_sw_asm.S +++ b/arch/arc/kernel/ctx_sw_asm.S @@ -44,9 +44,6 @@ __switch_to: * don't need to do anything special to return it */ - /* hardware memory barrier */ - sync - /* * switch to new task, contained in r1 * Temp reg r3 is required to get the ptr to store val diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 91d5a0f1f3f7..a3f750e76b68 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls) void arch_cpu_idle(void) { /* sleep, but enable all interrupts before committing */ - if (is_isa_arcompact()) { - __asm__("sleep 0x3"); - } else { - __asm__("sleep 0x10"); - } + __asm__ __volatile__( + "sleep %0 \n" + : + :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */ } asmlinkage void ret_from_fork(void); diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 93c6ea52b671..7352475451f6 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame) (const u8 *)(fde + 1) + *fde, ptrType); - if (pc >= endLoc) + if (pc >= endLoc) { fde = NULL; - } else - fde = NULL; - } - if (fde == NULL) { - for (fde = table->address, tableSize = table->size; - cie = NULL, tableSize > sizeof(*fde) - && tableSize - sizeof(*fde) >= *fde; - tableSize -= sizeof(*fde) + *fde, - fde += 1 + *fde / sizeof(*fde)) { - cie = cie_for_fde(fde, table); - if (cie == &bad_cie) { cie = NULL; - break; } - if (cie == NULL - || cie == ¬_fde - || (ptrType = fde_pointer_type(cie)) < 0) - continue; - ptr = (const u8 *)(fde + 2); - startLoc = read_pointer(&ptr, - (const u8 *)(fde + 1) + - *fde, ptrType); - if (!startLoc) - continue; - if (!(ptrType & DW_EH_PE_indirect)) - ptrType &= - DW_EH_PE_FORM | DW_EH_PE_signed; - endLoc = - startLoc + read_pointer(&ptr, - (const u8 *)(fde + - 1) + - *fde, ptrType); - if (pc >= startLoc && pc < endLoc) - break; + } else { + fde = NULL; + cie = NULL; } } } diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 0ee739846847..daf2bf52b984 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); if (dirty) { - /* wback + inv dcache lines */ + /* wback + inv dcache lines (K-mapping) */ __flush_dcache_page(paddr, paddr); - /* invalidate any existing icache lines */ + /* invalidate any existing icache lines (U-mapping) */ if (vma->vm_flags & VM_EXEC) __inv_icache_page(paddr, vaddr); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0365cbbc9179..34e1569a11ee 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -76,6 +76,8 @@ config ARM select IRQ_FORCED_THREADING select MODULES_USE_ELF_REL select NO_BOOTMEM + select OF_EARLY_FLATTREE if OF + select OF_RESERVED_MEM if OF select OLD_SIGACTION select OLD_SIGSUSPEND3 select PERF_USE_VMALLOC @@ -1822,8 +1824,6 @@ config USE_OF bool "Flattened Device Tree support" select IRQ_DOMAIN select OF - select OF_EARLY_FLATTREE - select OF_RESERVED_MEM help Include support for flattened device tree machine descriptions. diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index d9ba6b879fc1..00352e761b8c 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -604,6 +604,7 @@ reg = <0x6f>; interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, <&dra7_pmx_core 0x424>; + interrupt-names = "irq", "wakeup"; pinctrl-names = "default"; pinctrl-0 = <&mcp79410_pins_default>; diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts index 4e0ad3b82796..0962f2fa3f6e 100644 --- a/arch/arm/boot/dts/animeo_ip.dts +++ b/arch/arm/boot/dts/animeo_ip.dts @@ -155,21 +155,21 @@ label = "keyswitch_in"; gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; error_in { label = "error_in"; gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; linux,code = <29>; - gpio-key,wakeup; + wakeup-source; }; btn { label = "btn"; gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; linux,code = <31>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index c6a0e9d7f1a9..e8b7f6726772 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -498,6 +498,7 @@ reg = <0x70000 0x4000>; interrupts-extended = <&mpic 8>; clocks = <&gateclk 4>; + tx-csum-limit = <9800>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts index f89598af4c2b..6bf873e7d96c 100644 --- a/arch/arm/boot/dts/at91-foxg20.dts +++ b/arch/arm/boot/dts/at91-foxg20.dts @@ -159,7 +159,7 @@ label = "Button"; gpios = <&pioC 4 GPIO_ACTIVE_LOW>; linux,code = <0x103>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts index bf18ece0c027..229e989eb60d 100644 --- a/arch/arm/boot/dts/at91-kizbox.dts +++ b/arch/arm/boot/dts/at91-kizbox.dts @@ -24,15 +24,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - main_xtal { clock-frequency = <18432000>; }; @@ -94,14 +85,14 @@ label = "PB_RST"; gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; user { label = "PB_USER"; gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; linux,code = <0x101>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts index f0b1563cb3f1..50a14568f094 100644 --- a/arch/arm/boot/dts/at91-kizbox2.dts +++ b/arch/arm/boot/dts/at91-kizbox2.dts @@ -171,21 +171,21 @@ label = "PB_PROG"; gpios = <&pioE 27 GPIO_ACTIVE_LOW>; linux,code = <0x102>; - gpio-key,wakeup; + wakeup-source; }; reset { label = "PB_RST"; gpios = <&pioE 29 GPIO_ACTIVE_LOW>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; user { label = "PB_USER"; gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; linux,code = <0x101>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts index 9f72b4932634..9682d105d4d8 100644 --- a/arch/arm/boot/dts/at91-kizboxmini.dts +++ b/arch/arm/boot/dts/at91-kizboxmini.dts @@ -98,14 +98,14 @@ label = "PB_PROG"; gpios = <&pioC 17 GPIO_ACTIVE_LOW>; linux,code = <0x102>; - gpio-key,wakeup; + wakeup-source; }; reset { label = "PB_RST"; gpios = <&pioC 16 GPIO_ACTIVE_LOW>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts index a9aef53ab764..4f2eebf4a560 100644 --- a/arch/arm/boot/dts/at91-qil_a9260.dts +++ b/arch/arm/boot/dts/at91-qil_a9260.dts @@ -183,7 +183,7 @@ label = "user_pb"; gpios = <&pioB 10 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index e07c2b206beb..ad6de73ed5a5 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts @@ -45,6 +45,7 @@ /dts-v1/; #include "sama5d2.dtsi" #include "sama5d2-pinfunc.h" +#include <dt-bindings/mfd/atmel-flexcom.h> / { model = "Atmel SAMA5D2 Xplained"; @@ -59,15 +60,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -91,6 +83,22 @@ status = "okay"; }; + sdmmc0: sdio-host@a0000000 { + bus-width = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc0_default>; + non-removable; + mmc-ddr-1_8v; + status = "okay"; + }; + + sdmmc1: sdio-host@b0000000 { + bus-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc1_default>; + status = "okay"; /* conflict with qspi0 */ + }; + apb { spi0: spi@f8000000 { pinctrl-names = "default"; @@ -181,12 +189,49 @@ }; }; + flx0: flexcom@f8034000 { + atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>; + status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */ + + uart5: serial@200 { + compatible = "atmel,at91sam9260-usart"; + reg = <0x200 0x200>; + interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&flx0_clk>; + clock-names = "usart"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx0_default>; + atmel,fifo-size = <32>; + status = "okay"; + }; + }; + uart3: serial@fc008000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3_default>; status = "okay"; }; + flx4: flexcom@fc018000 { + atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>; + status = "okay"; + + i2c2: i2c@600 { + compatible = "atmel,sama5d2-i2c"; + reg = <0x600 0x200>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>; + dmas = <0>, <0>; + dma-names = "tx", "rx"; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&flx4_clk>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx4_default>; + atmel,fifo-size = <16>; + status = "okay"; + }; + }; + i2c1: i2c@fc028000 { dmas = <0>, <0>; pinctrl-names = "default"; @@ -201,6 +246,18 @@ }; pinctrl@fc038000 { + pinctrl_flx0_default: flx0_default { + pinmux = <PIN_PB28__FLEXCOM0_IO0>, + <PIN_PB29__FLEXCOM0_IO1>; + bias-disable; + }; + + pinctrl_flx4_default: flx4_default { + pinmux = <PIN_PD12__FLEXCOM4_IO0>, + <PIN_PD13__FLEXCOM4_IO1>; + bias-disable; + }; + pinctrl_i2c0_default: i2c0_default { pinmux = <PIN_PD21__TWD0>, <PIN_PD22__TWCK0>; @@ -227,6 +284,46 @@ bias-disable; }; + pinctrl_sdmmc0_default: sdmmc0_default { + cmd_data { + pinmux = <PIN_PA1__SDMMC0_CMD>, + <PIN_PA2__SDMMC0_DAT0>, + <PIN_PA3__SDMMC0_DAT1>, + <PIN_PA4__SDMMC0_DAT2>, + <PIN_PA5__SDMMC0_DAT3>, + <PIN_PA6__SDMMC0_DAT4>, + <PIN_PA7__SDMMC0_DAT5>, + <PIN_PA8__SDMMC0_DAT6>, + <PIN_PA9__SDMMC0_DAT7>; + bias-pull-up; + }; + + ck_cd_rstn_vddsel { + pinmux = <PIN_PA0__SDMMC0_CK>, + <PIN_PA10__SDMMC0_RSTN>, + <PIN_PA11__SDMMC0_VDDSEL>, + <PIN_PA13__SDMMC0_CD>; + bias-disable; + }; + }; + + pinctrl_sdmmc1_default: sdmmc1_default { + cmd_data { + pinmux = <PIN_PA28__SDMMC1_CMD>, + <PIN_PA18__SDMMC1_DAT0>, + <PIN_PA19__SDMMC1_DAT1>, + <PIN_PA20__SDMMC1_DAT2>, + <PIN_PA21__SDMMC1_DAT3>; + bias-pull-up; + }; + + conf-ck_cd { + pinmux = <PIN_PA22__SDMMC1_CK>, + <PIN_PA30__SDMMC1_CD>; + bias-disable; + }; + }; + pinctrl_spi0_default: spi0_default { pinmux = <PIN_PA14__SPI0_SPCK>, <PIN_PA15__SPI0_MOSI>, diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index 8488ac53d22d..ff888d21c786 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -315,7 +315,7 @@ label = "PB_USER"; gpios = <&pioE 29 GPIO_ACTIVE_LOW>; linux,code = <0x104>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index 45371a1b61b3..131614f28e75 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -50,7 +50,6 @@ compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; chosen { - bootargs = "ignore_loglevel earlyprintk"; stdout-path = "serial0:115200n8"; }; @@ -59,15 +58,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -235,7 +225,7 @@ label = "pb_user1"; gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts index 6d272c0125e3..2d4a33100af6 100644 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts @@ -50,7 +50,6 @@ compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; chosen { - bootargs = "ignore_loglevel earlyprintk"; stdout-path = "serial0:115200n8"; }; @@ -59,15 +58,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -304,7 +294,7 @@ label = "pb_user1"; gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts index 8dab4b75ca97..f90e1c2d3caa 100644 --- a/arch/arm/boot/dts/at91rm9200ek.dts +++ b/arch/arm/boot/dts/at91rm9200ek.dts @@ -21,15 +21,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - slow_xtal { clock-frequency = <32768>; }; diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts index 2e92ac020f23..55bd51f07fa6 100644 --- a/arch/arm/boot/dts/at91sam9261ek.dts +++ b/arch/arm/boot/dts/at91sam9261ek.dts @@ -22,15 +22,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -149,7 +140,7 @@ ti,debounce-tol = /bits/ 16 <65535>; ti,debounce-max = /bits/ 16 <1>; - linux,wakeup; + wakeup-source; }; }; @@ -193,28 +184,28 @@ label = "button_0"; gpios = <&pioA 27 GPIO_ACTIVE_LOW>; linux,code = <256>; - gpio-key,wakeup; + wakeup-source; }; button_1 { label = "button_1"; gpios = <&pioA 26 GPIO_ACTIVE_LOW>; linux,code = <257>; - gpio-key,wakeup; + wakeup-source; }; button_2 { label = "button_2"; gpios = <&pioA 25 GPIO_ACTIVE_LOW>; linux,code = <258>; - gpio-key,wakeup; + wakeup-source; }; button_3 { label = "button_3"; gpios = <&pioA 24 GPIO_ACTIVE_LOW>; linux,code = <259>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts index 23381276ffb8..59df9d73d276 100644 --- a/arch/arm/boot/dts/at91sam9263ek.dts +++ b/arch/arm/boot/dts/at91sam9263ek.dts @@ -22,15 +22,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <16367660>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -213,14 +204,14 @@ label = "left_click"; gpios = <&pioC 5 GPIO_ACTIVE_LOW>; linux,code = <272>; - gpio-key,wakeup; + wakeup-source; }; right_click { label = "right_click"; gpios = <&pioC 4 GPIO_ACTIVE_LOW>; linux,code = <273>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 57548a2c5a1e..e9cc99b6353a 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -19,15 +19,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <18432000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -206,14 +197,14 @@ label = "Button 3"; gpios = <&pioA 30 GPIO_ACTIVE_LOW>; linux,code = <0x103>; - gpio-key,wakeup; + wakeup-source; }; btn4 { label = "Button 4"; gpios = <&pioA 31 GPIO_ACTIVE_LOW>; linux,code = <0x104>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index 9d16ef8453c5..2400c99134f7 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -24,15 +24,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -323,14 +314,14 @@ label = "left_click"; gpios = <&pioB 6 GPIO_ACTIVE_LOW>; linux,code = <272>; - gpio-key,wakeup; + wakeup-source; }; right_click { label = "right_click"; gpios = <&pioB 7 GPIO_ACTIVE_LOW>; linux,code = <273>; - gpio-key,wakeup; + wakeup-source; }; left { diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index acf3451a332d..ca4ddf86817a 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -23,15 +23,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <16000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -219,7 +210,7 @@ label = "Enter"; gpios = <&pioB 3 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts index 558c9f220bed..f10566f759cd 100644 --- a/arch/arm/boot/dts/at91sam9rlek.dts +++ b/arch/arm/boot/dts/at91sam9rlek.dts @@ -22,15 +22,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - slow_xtal { clock-frequency = <32768>; }; @@ -225,14 +216,14 @@ label = "right_click"; gpios = <&pioB 0 GPIO_ACTIVE_LOW>; linux,code = <273>; - gpio-key,wakeup; + wakeup-source; }; left_click { label = "left_click"; gpios = <&pioB 1 GPIO_ACTIVE_LOW>; linux,code = <272>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi index 26112ebd15fc..b098ad8cd93a 100644 --- a/arch/arm/boot/dts/at91sam9x5cm.dtsi +++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi @@ -13,17 +13,6 @@ }; clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - main_clock: clock@0 { - compatible = "atmel,osc", "fixed-clock"; - clock-frequency = <12000000>; - }; - }; - - clocks { slow_xtal { clock-frequency = <32768>; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index bc672fb91466..fe99231cbde5 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1459,8 +1459,8 @@ interrupt-names = "tx", "rx"; dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; dma-names = "tx", "rx"; - clocks = <&mcasp3_ahclkx_mux>; - clock-names = "fck"; + clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>; + clock-names = "fck", "ahclkx"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/k2l-netcp.dtsi index 01aef230773d..5acbd0dcc2ab 100644 --- a/arch/arm/boot/dts/k2l-netcp.dtsi +++ b/arch/arm/boot/dts/k2l-netcp.dtsi @@ -137,7 +137,7 @@ netcp: netcp@26000000 { /* NetCP address range */ ranges = <0 0x26000000 0x1000000>; - clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; + clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>; dma-coherent; ti,navigator-dmas = <&dma_gbe 0>, diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi index c56ab6bbfe3c..0e46560551f4 100644 --- a/arch/arm/boot/dts/kirkwood-ts219.dtsi +++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi @@ -40,7 +40,7 @@ }; poweroff@12100 { compatible = "qnap,power-off"; - reg = <0x12000 0x100>; + reg = <0x12100 0x100>; clocks = <&gate_clk 7>; }; spi@10600 { diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts index 8fd8ef2c72da..85f0373df498 100644 --- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts +++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts @@ -86,6 +86,10 @@ }; }; +&emmc { + /delete-property/mmc-hs200-1_8v; +}; + &gpio_keys { pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 6a79c9c526b8..04ea209f1737 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -452,8 +452,10 @@ clock-names = "tsadc", "apb_pclk"; resets = <&cru SRST_TSADC>; reset-names = "tsadc-apb"; - pinctrl-names = "default"; - pinctrl-0 = <&otp_out>; + pinctrl-names = "init", "default", "sleep"; + pinctrl-0 = <&otp_gpio>; + pinctrl-1 = <&otp_out>; + pinctrl-2 = <&otp_gpio>; #thermal-sensor-cells = <1>; rockchip,hw-tshut-temp = <95000>; status = "disabled"; @@ -1395,6 +1397,10 @@ }; tsadc { + otp_gpio: otp-gpio { + rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>; + }; + otp_out: otp-out { rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; }; diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts index d9a9aca1ccfd..e812f5c1bf70 100644 --- a/arch/arm/boot/dts/sama5d35ek.dts +++ b/arch/arm/boot/dts/sama5d35ek.dts @@ -49,7 +49,7 @@ label = "pb_user1"; gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; linux,code = <0x100>; - gpio-key,wakeup; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 15bbaf690047..2193637b9cd2 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -1300,7 +1300,7 @@ }; watchdog@fc068640 { - compatible = "atmel,at91sam9260-wdt"; + compatible = "atmel,sama5d4-wdt"; reg = <0xfc068640 0x10>; clocks = <&clk32k>; status = "disabled"; diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi index 12edafefd44a..9beea8976584 100644 --- a/arch/arm/boot/dts/usb_a9260_common.dtsi +++ b/arch/arm/boot/dts/usb_a9260_common.dtsi @@ -115,7 +115,7 @@ label = "user_pb"; gpios = <&pioB 10 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts index 68c0de36c339..8cc6edb29694 100644 --- a/arch/arm/boot/dts/usb_a9263.dts +++ b/arch/arm/boot/dts/usb_a9263.dts @@ -143,7 +143,7 @@ label = "user_pb"; gpios = <&pioB 10 GPIO_ACTIVE_LOW>; linux,code = <28>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index 6736bae43a5b..0d5acc2cdc8e 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -158,7 +158,7 @@ interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks VF610_CLK_DSPI0>; clock-names = "dspi"; - spi-num-chipselects = <5>; + spi-num-chipselects = <6>; status = "disabled"; }; @@ -170,7 +170,7 @@ interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks VF610_CLK_DSPI1>; clock-names = "dspi"; - spi-num-chipselects = <5>; + spi-num-chipselects = <4>; status = "disabled"; }; @@ -461,6 +461,8 @@ clock-names = "adc"; #io-channel-cells = <1>; status = "disabled"; + fsl,adck-max-frequency = <30000000>, <40000000>, + <20000000>; }; esdhc0: esdhc@400b1000 { @@ -472,8 +474,6 @@ <&clks VF610_CLK_ESDHC0>; clock-names = "ipg", "ahb", "per"; status = "disabled"; - fsl,adck-max-frequency = <30000000>, <40000000>, - <20000000>; }; esdhc1: esdhc@400b2000 { diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index 1b1e5acd76e2..e4b1be66b3f5 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_AT91SAM9X_WATCHDOG=y -CONFIG_SSB=m CONFIG_MFD_ATMEL_HLCDC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index a0c57ac88b27..63f7e6ce649a 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y # CONFIG_HWMON is not set -CONFIG_SSB=m CONFIG_MFD_ATMEL_FLEXCOM=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index be1d07d59ee9..1bd9510de1b9 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool); #define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) #endif +static inline int nr_legacy_irqs(void) +{ + return NR_IRQS_LEGACY; +} + #endif #endif diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a9c80a2ea1a7..3095df091ff8 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -28,6 +28,18 @@ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); +static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, + u8 reg_num) +{ + return *vcpu_reg(vcpu, reg_num); +} + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + *vcpu_reg(vcpu, reg_num) = val; +} + bool kvm_condition_valid(struct kvm_vcpu *vcpu); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 7a2a32a1d5a8..ede692ffa32e 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -416,6 +416,7 @@ #define __NR_execveat (__NR_SYSCALL_BASE+387) #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) #define __NR_membarrier (__NR_SYSCALL_BASE+389) +#define __NR_mlock2 (__NR_SYSCALL_BASE+390) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 6551d28c27e6..066f7f9ba411 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -17,11 +17,6 @@ #include <asm/mach/pci.h> static int debug_pci; -static resource_size_t (*align_resource)(struct pci_dev *dev, - const struct resource *res, - resource_size_t start, - resource_size_t size, - resource_size_t align) = NULL; /* * We can't use pci_get_device() here since we are @@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; - align_resource = hw->align_resource; INIT_LIST_HEAD(&sys->resources); if (hw->private_data) @@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, ret = hw->setup(nr, sys); if (ret > 0) { + struct pci_host_bridge *host_bridge; + ret = pcibios_init_resources(nr, sys); if (ret) { kfree(sys); @@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, busnr = sys->bus->busn_res.end + 1; list_add(&sys->node, head); + + host_bridge = pci_find_host_bridge(sys->bus); + host_bridge->align_resource = hw->align_resource; } else { kfree(sys); if (ret < 0) @@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, { struct pci_dev *dev = data; resource_size_t start = res->start; + struct pci_host_bridge *host_bridge; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; start = (start + align - 1) & ~(align - 1); - if (align_resource) - return align_resource(dev, res, start, size, align); + host_bridge = pci_find_host_bridge(dev->bus); + + if (host_bridge->align_resource) + return host_bridge->align_resource(dev, res, + start, size, align); return start; } diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index fde6c88d560c..ac368bb068d1 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -399,6 +399,7 @@ CALL(sys_execveat) CALL(sys_userfaultfd) CALL(sys_membarrier) + CALL(sys_mlock2) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index eab83b2435b8..e06fd299de08 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) vcpu_sleep(vcpu); /* - * Disarming the background timer must be done in a - * preemptible context, as this call may sleep. - */ - kvm_timer_flush_hwstate(vcpu); - - /* * Preparing the interrupts to be injected also * involves poking the GIC, which must be done in a * non-preemptible context. */ preempt_disable(); + kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); local_irq_disable(); diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 974b1c606d04..3a10c9f1d0a4 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, data); data = vcpu_data_host_to_guest(vcpu, data, len); - *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; + vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); } return 0; @@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, rt = vcpu->arch.mmio_decode.rt; if (is_write) { - data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); + data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt), + len); trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); mmio_write_buf(data_buf, len, data); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 6984342da13d..61d96a645ff3 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud) __kvm_flush_dcache_pud(pud); } +static bool kvm_is_device_pfn(unsigned long pfn) +{ + return !pfn_valid(pfn); +} + /** * stage2_dissolve_pmd() - clear and flush huge PMD entry * @kvm: pointer to kvm structure. @@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, kvm_tlb_flush_vmid_ipa(kvm, addr); /* No need to invalidate the cache for device mappings */ - if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) + if (!kvm_is_device_pfn(pte_pfn(old_pte))) kvm_flush_dcache_pte(old_pte); put_page(virt_to_page(pte)); @@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, pte = pte_offset_kernel(pmd, addr); do { - if (!pte_none(*pte) && - (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) + if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte))) kvm_flush_dcache_pte(*pte); } while (pte++, addr += PAGE_SIZE, addr != end); } @@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) return kvm_vcpu_dabt_iswrite(vcpu); } -static bool kvm_is_device_pfn(unsigned long pfn) -{ - return !pfn_valid(pfn); -} - /** * stage2_wp_ptes - write protect PMD range * @pmd: pointer to pmd entry diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 0b556968a6da..a9b3b905e661 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) unsigned long context_id; phys_addr_t target_pc; - cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; + cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); @@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; } - target_pc = *vcpu_reg(source_vcpu, 2); - context_id = *vcpu_reg(source_vcpu, 3); + target_pc = vcpu_get_reg(source_vcpu, 2); + context_id = vcpu_get_reg(source_vcpu, 3); kvm_reset_vcpu(vcpu); @@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) * NOTE: We always update r0 (or x0) because for PSCI v0.1 * the general puspose registers are undefined upon CPU_ON. */ - *vcpu_reg(vcpu, 0) = context_id; + vcpu_set_reg(vcpu, 0, context_id); vcpu->arch.power_off = false; smp_mb(); /* Make sure the above is visible */ @@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tmp; - target_affinity = *vcpu_reg(vcpu, 1); - lowest_affinity_level = *vcpu_reg(vcpu, 2); + target_affinity = vcpu_get_reg(vcpu, 1); + lowest_affinity_level = vcpu_get_reg(vcpu, 2); /* Determine target affinity mask */ target_affinity_mask = psci_affinity_mask(lowest_affinity_level); @@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { int ret = 1; - unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); + unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); unsigned long val; switch (psci_fn) { @@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) break; } - *vcpu_reg(vcpu, 0) = val; + vcpu_set_reg(vcpu, 0, val); return ret; } static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { - unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); + unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); unsigned long val; switch (psci_fn) { @@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) break; } - *vcpu_reg(vcpu, 0) = val; + vcpu_set_reg(vcpu, 0, val); return 1; } diff --git a/arch/arm/mach-dove/include/mach/entry-macro.S b/arch/arm/mach-dove/include/mach/entry-macro.S index 72d622baaad3..df1d44bdc375 100644 --- a/arch/arm/mach-dove/include/mach/entry-macro.S +++ b/arch/arm/mach-dove/include/mach/entry-macro.S @@ -18,13 +18,13 @@ @ check low interrupts ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] - mov \irqnr, #31 + mov \irqnr, #32 ands \irqstat, \irqstat, \tmp @ if no low interrupts set, check high interrupts ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] - moveq \irqnr, #63 + moveq \irqnr, #64 andeqs \irqstat, \irqstat, \tmp @ find first active interrupt source diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 8e7976a4c3e7..cfc696b972f3 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = { .irq_unmask = imx_gpc_irq_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_wake = imx_gpc_irq_set_wake, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 5305ec7341ec..79e1f876d1c9 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Ensure that CPU power state is set to ON to avoid CPU * powerdomain transition on wfi */ - clkdm_wakeup(cpu1_clkdm); - omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); - clkdm_allow_idle(cpu1_clkdm); + clkdm_wakeup_nolock(cpu1_clkdm); + pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); + clkdm_allow_idle_nolock(cpu1_clkdm); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { while (gic_dist_disabled()) { diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index cc8a987149e2..48495ad82aba 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh) return ret; } +static void _enable_optional_clocks(struct omap_hwmod *oh) +{ + struct omap_hwmod_opt_clk *oc; + int i; + + pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); + + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) + if (oc->_clk) { + pr_debug("omap_hwmod: enable %s:%s\n", oc->role, + __clk_get_name(oc->_clk)); + clk_enable(oc->_clk); + } +} + +static void _disable_optional_clocks(struct omap_hwmod *oh) +{ + struct omap_hwmod_opt_clk *oc; + int i; + + pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); + + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) + if (oc->_clk) { + pr_debug("omap_hwmod: disable %s:%s\n", oc->role, + __clk_get_name(oc->_clk)); + clk_disable(oc->_clk); + } +} + /** * _enable_clocks - enable hwmod main clock and interface clocks * @oh: struct omap_hwmod * @@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh) clk_enable(os->_clk); } + if (oh->flags & HWMOD_OPT_CLKS_NEEDED) + _enable_optional_clocks(oh); + /* The opt clocks are controlled by the device driver. */ return 0; @@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh) clk_disable(os->_clk); } + if (oh->flags & HWMOD_OPT_CLKS_NEEDED) + _disable_optional_clocks(oh); + /* The opt clocks are controlled by the device driver. */ return 0; } -static void _enable_optional_clocks(struct omap_hwmod *oh) -{ - struct omap_hwmod_opt_clk *oc; - int i; - - pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); - - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) - if (oc->_clk) { - pr_debug("omap_hwmod: enable %s:%s\n", oc->role, - __clk_get_name(oc->_clk)); - clk_enable(oc->_clk); - } -} - -static void _disable_optional_clocks(struct omap_hwmod *oh) -{ - struct omap_hwmod_opt_clk *oc; - int i; - - pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); - - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) - if (oc->_clk) { - pr_debug("omap_hwmod: disable %s:%s\n", oc->role, - __clk_get_name(oc->_clk)); - clk_disable(oc->_clk); - } -} - /** * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index ca6df1a73475..76bce11c85a4 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm { * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up * events by calling _reconfigure_io_chain() when a device is enabled * or idled. + * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to + * operate and they need to be handled at the same time as the main_clk. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_FORCE_MSTANDBY (1 << 11) #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) #define HWMOD_RECONFIG_IO_CHAIN (1 << 13) +#define HWMOD_OPT_CLKS_NEEDED (1 << 14) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 51d1ecb384bd..ee4e04434a94 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1298,6 +1298,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = { }; /* + * 'mcasp' class + * + */ +static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = { + .sysc_offs = 0x0004, + .sysc_flags = SYSC_HAS_SIDLEMODE, + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type3, +}; + +static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = { + .name = "mcasp", + .sysc = &dra7xx_mcasp_sysc, +}; + +/* mcasp3 */ +static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = { + { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" }, +}; + +static struct omap_hwmod dra7xx_mcasp3_hwmod = { + .name = "mcasp3", + .class = &dra7xx_mcasp_hwmod_class, + .clkdm_name = "l4per2_clkdm", + .main_clk = "mcasp3_aux_gfclk_mux", + .flags = HWMOD_OPT_CLKS_NEEDED, + .prcm = { + .omap4 = { + .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET, + .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .opt_clks = mcasp3_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks), +}; + +/* * 'mmc' class * */ @@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; +/* l4_per2 -> mcasp3 */ +static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = { + .master = &dra7xx_l4_per2_hwmod, + .slave = &dra7xx_mcasp3_hwmod, + .clk = "l4_root_clk_div", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +/* l3_main_1 -> mcasp3 */ +static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = { + .master = &dra7xx_l3_main_1_hwmod, + .slave = &dra7xx_mcasp3_hwmod, + .clk = "l3_iclk_div", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + /* l4_per1 -> elm */ static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { .master = &dra7xx_l4_per1_hwmod, @@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_wkup__dcan1, &dra7xx_l4_per2__dcan2, &dra7xx_l4_per2__cpgmac0, + &dra7xx_l4_per2__mcasp3, + &dra7xx_l3_main_1__mcasp3, &dra7xx_gmac__mdio, &dra7xx_l4_cfg__dma_system, &dra7xx_l3_main_1__dss, diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c index b1288f56d509..6256052893ec 100644 --- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c @@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = { .name = "l4_ls", .clkdm_name = "alwon_l3s_clkdm", .class = &l4_hwmod_class, + .flags = HWMOD_NO_IDLEST, }; /* @@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = { .name = "l4_hs", .clkdm_name = "alwon_l3_med_clkdm", .class = &l4_hwmod_class, + .flags = HWMOD_NO_IDLEST, }; /* L3 slow -> L4 ls peripheral interface running at 125MHz */ @@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = { .name = "emac0", .clkdm_name = "alwon_ethernet_clkdm", .class = &dm816x_emac_hwmod_class, + .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 1dfe34654c43..58144779dec4 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -24,9 +24,6 @@ #include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/wkup_m3.h> -#include <asm/siginfo.h> -#include <asm/signal.h> - #include "common.h" #include "common-board-devices.h" #include "dss-common.h" @@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void) } #endif /* CONFIG_ARCH_OMAP3 */ -#ifdef CONFIG_SOC_TI81XX -static int fault_fixed_up; - -static int t410_abort_handler(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) -{ - if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) { - pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n", - addr, fsr); - fault_fixed_up = 1; - return 0; - } - - return 1; -} - -static void __init t410_abort_init(void) -{ - hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR, - "imprecise external abort"); -} -#endif - #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) static struct iommu_platform_data omap4_iommu_pdata = { .reset_name = "mmu_cache", @@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = { { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, #endif -#ifdef CONFIG_SOC_TI81XX - { "hp,t410", t410_abort_init, }, -#endif #ifdef CONFIG_SOC_OMAP5 { "ti,omap5-uevm", omap5_uevm_legacy_init, }, #endif diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 87b98bf92366..2dbd3785ee6f 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -301,11 +301,11 @@ static void omap3_pm_idle(void) if (omap_irq_pending()) return; - trace_cpu_idle(1, smp_processor_id()); + trace_cpu_idle_rcuidle(1, smp_processor_id()); omap_sram_idle(); - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-orion5x/include/mach/entry-macro.S b/arch/arm/mach-orion5x/include/mach/entry-macro.S index 79eb502a1e64..73919a36b577 100644 --- a/arch/arm/mach-orion5x/include/mach/entry-macro.S +++ b/arch/arm/mach-orion5x/include/mach/entry-macro.S @@ -21,5 +21,5 @@ @ find cause bits that are unmasked ands \irqstat, \irqstat, \tmp @ clear Z flag if any clzne \irqnr, \irqstat @ calc irqnr - rsbne \irqnr, \irqnr, #31 + rsbne \irqnr, \irqnr, #32 .endm diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 13eba2b26e0a..8fbfb10047ec 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd) { palm_bl_power = bl; palm_lcd_power = lcd; - pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); + pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); platform_device_register(&palm27x_backlight); } #endif diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c index aebf6de62468..0b5c3876720c 100644 --- a/arch/arm/mach-pxa/palmtc.c +++ b/arch/arm/mach-pxa/palmtc.c @@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {} #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) static struct pwm_lookup palmtc_pwm_lookup[] = { PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, - PWM_PERIOD_NORMAL), + PWM_POLARITY_NORMAL), }; static struct platform_pwm_backlight_data palmtc_backlight_data = { diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c index 1d2825cb7a65..5fce87f7f254 100644 --- a/arch/arm/mach-shmobile/setup-r8a7793.c +++ b/arch/arm/mach-shmobile/setup-r8a7793.c @@ -19,7 +19,7 @@ #include "common.h" #include "rcar-gen2.h" -static const char *r8a7793_boards_compat_dt[] __initconst = { +static const char * const r8a7793_boards_compat_dt[] __initconst = { "renesas,r8a7793", NULL, }; diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig index 7fdc5bf24f9b..446334a25cf5 100644 --- a/arch/arm/mach-zx/Kconfig +++ b/arch/arm/mach-zx/Kconfig @@ -13,7 +13,7 @@ config SOC_ZX296702 select ARM_GLOBAL_TIMER select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP - select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS if PM help Support for ZTE ZX296702 SoC which is a dual core CortexA9MP endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ac16a482ff1..871f21783866 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -49,7 +49,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK @@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075 If unsure, say Y. +config ARM64_ERRATUM_834220 + bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" + depends on KVM + default y + help + This option adds an alternative code sequence to work around ARM + erratum 834220 on Cortex-A57 parts up to r1p2. + + Affected Cortex-A57 parts might report a Stage 2 translation + fault as the result of a Stage 1 fault for load crossing a + page boundary when there is a permission or device memory + alignment fault at Stage 1 and a translation fault at Stage 2. + + The workaround is to verify that the Stage 1 translation + doesn't generate a fault before handling the Stage 2 fault. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" depends on COMPAT diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 11d5bb0fdd54..8f271b83f910 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -29,8 +29,9 @@ #define ARM64_HAS_PAN 4 #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 +#define ARM64_WORKAROUND_834220 7 -#define ARM64_NCAPS 7 +#define ARM64_NCAPS 8 #ifndef __ASSEMBLY__ @@ -46,8 +47,12 @@ enum ftr_type { #define FTR_STRICT true /* SANITY check strict matching required */ #define FTR_NONSTRICT false /* SANITY check ignored */ +#define FTR_SIGNED true /* Value should be treated as signed */ +#define FTR_UNSIGNED false /* Value should be treated as unsigned */ + struct arm64_ftr_bits { - bool strict; /* CPU Sanity check: strict matching required ? */ + bool sign; /* Value is signed ? */ + bool strict; /* CPU Sanity check: strict matching required ? */ enum ftr_type type; u8 shift; u8 width; @@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field) return cpuid_feature_extract_field_width(features, field, 4); } +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) +{ + return (u64)(features << (64 - width - field)) >> (64 - width); +} + +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field(u64 features, int field) +{ + return cpuid_feature_extract_unsigned_field_width(features, field, 4); +} + static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); @@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) { - return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); + return ftrp->sign ? + cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) : + cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index e54415ec6935..9732908bfc8a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp; /* Determine number of BRP registers available. */ static inline int get_num_brps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_BRPS_SHIFT); } /* Determine number of WRP registers available. */ static inline int get_num_wrps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_WRPS_SHIFT); } diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index 23eb450b820b..8e8d30684392 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -7,4 +7,9 @@ struct pt_regs; extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +static inline int nr_legacy_irqs(void) +{ + return 0; +} + #endif diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 17e92f05b1fe..25a40213bd9b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -99,12 +99,22 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; } -static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) +/* + * vcpu_get_reg and vcpu_set_reg should always be passed a register number + * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on + * AArch32 with banked registers. + */ +static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, + u8 reg_num) { - if (vcpu_mode_is_32bit(vcpu)) - return vcpu_reg32(vcpu, reg_num); + return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; +} - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + if (reg_num != 31) + vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; } /* Get vcpu SPSR for current mode */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 24926f2504f7..feb6b4efa641 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_834220 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 834220", + .capability = ARM64_WORKAROUND_834220, + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif #ifdef CONFIG_ARM64_ERRATUM_845719 { /* Cortex-A53 r0p[01234] */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c8cf89223b5a..0669c63281ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ +#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ + .sign = SIGNED, \ .strict = STRICT, \ .type = TYPE, \ .shift = SHIFT, \ @@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); .safe_val = SAFE_VAL, \ } +/* Define a feature with signed values */ +#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +/* Define a feature with unsigned value */ +#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + #define ARM64_FTR_END \ { \ .width = 0, \ @@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static struct arm64_ftr_bits ftr_ctr[] = { - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will * make use of *minLine */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ ARM64_FTR_END, }; @@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fc5508e0df57..4eeb17198cfa 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -127,7 +127,11 @@ static int __init uefi_init(void) table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; config_tables = early_memremap(efi_to_phys(efi.systab->tables), table_size); - + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sizeof(efi_config_table_64_t), NULL); @@ -209,6 +213,14 @@ void __init efi_init(void) PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); memmap.phys_map = params.mmap; memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } memmap.map_end = memmap.map + params.mmap_size; memmap.desc_size = params.desc_size; memmap.desc_version = params.desc_ver; @@ -227,7 +239,6 @@ static bool __init efi_virtmap_init(void) init_new_context(NULL, &efi_mm); for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; pgprot_t prot; if (!(md->attribute & EFI_MEMORY_RUNTIME)) @@ -235,11 +246,6 @@ static bool __init efi_virtmap_init(void) if (md->virt_addr == 0) return false; - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - pr_info(" EFI remap 0x%016llx => %p\n", md->phys_addr, (void *)md->virt_addr); @@ -256,7 +262,8 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, + create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, __pgprot(pgprot_val(prot) | PTE_NG)); } return true; @@ -273,12 +280,12 @@ static int __init arm64_enable_runtime_services(void) if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); - return -1; + return 0; } if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); - return -1; + return 0; } pr_info("Remapping and enabling EFI services.\n"); @@ -288,7 +295,7 @@ static int __init arm64_enable_runtime_services(void) mapsize); if (!memmap.map) { pr_err("Failed to remap EFI memory map\n"); - return -1; + return -ENOMEM; } memmap.map_end = memmap.map + mapsize; efi.memmap = &memmap; @@ -297,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) sizeof(efi_system_table_t)); if (!efi.systab) { pr_err("Failed to remap EFI System Table\n"); - return -1; + return -ENOMEM; } set_bit(EFI_SYSTEM_TABLES, &efi.flags); if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); - return -1; + return -ENOMEM; } /* Set up runtime services function pointers */ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 68a0759b1375..15f0477b0d2a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; - trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), + trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), kvm_vcpu_hvc_get_imm(vcpu)); ret = kvm_psci_call(vcpu); diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1599701ef044..86c289832272 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context) ENDPROC(__kvm_flush_vm_context) __kvm_hyp_panic: + // Stash PAR_EL1 before corrupting it in __restore_sysregs + mrs x0, par_el1 + push x0, xzr + // Guess the context by looking at VTTBR: // If zero, then we're already a host. // Otherwise restore a minimal host context before panicing. @@ -898,7 +902,7 @@ __kvm_hyp_panic: mrs x3, esr_el2 mrs x4, far_el2 mrs x5, hpfar_el2 - mrs x6, par_el1 + pop x6, xzr // active context PAR_EL1 mrs x7, tpidr_el2 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -914,7 +918,7 @@ __kvm_hyp_panic: ENDPROC(__kvm_hyp_panic) __hyp_panic_str: - .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" + .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" .align 2 @@ -1015,9 +1019,15 @@ el1_trap: b.ne 1f // Not an abort we care about /* This is an abort. Check for permission fault */ +alternative_if_not ARM64_WORKAROUND_834220 and x2, x1, #ESR_ELx_FSC_TYPE cmp x2, #FSC_PERM b.ne 1f // Not a permission fault +alternative_else + nop // Use the permission fault path to + nop // check for a valid S1 translation, + nop // regardless of the ESR value. +alternative_endif /* * Check for Stage-1 page table walk, which is guaranteed diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 85c57158dcd9..648112e90ed5 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) /* Note: These now point to the banked copies */ *vcpu_spsr(vcpu) = new_spsr_value; - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; /* Branch to exception vector */ if (sctlr & (1 << 13)) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 87a64e8db04c..d2650e84faf2 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr) * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). */ static bool access_dcsw(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!p->is_write) @@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, * sys_regs and leave it in complete control of the caches. */ static bool access_vm_reg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { - unsigned long val; bool was_enabled = vcpu_has_cache_enabled(vcpu); BUG_ON(!p->is_write); - val = *vcpu_reg(vcpu, p->Rt); if (!p->is_aarch32) { - vcpu_sys_reg(vcpu, r->reg) = val; + vcpu_sys_reg(vcpu, r->reg) = p->regval; } else { if (!p->is_32bit) - vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; - vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; + vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); + vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); } kvm_toggle_cache(vcpu, was_enabled); @@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, * for both AArch64 and AArch32 accesses. */ static bool access_gic_sgi(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 val; - if (!p->is_write) return read_from_write_only(vcpu, p); - val = *vcpu_reg(vcpu, p->Rt); - vgic_v3_dispatch_sgi(vcpu, val); + vgic_v3_dispatch_sgi(vcpu, p->regval); return true; } static bool trap_raz_wi(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) @@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { - *vcpu_reg(vcpu, p->Rt) = (1 << 3); + p->regval = (1 << 3); return true; } } static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { @@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, } else { u32 val; asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); - *vcpu_reg(vcpu, p->Rt) = val; + p->regval = val; return true; } } @@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, * now use the debug registers. */ static bool trap_debug_regs(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { - vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); + vcpu_sys_reg(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { - *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); + p->regval = vcpu_sys_reg(vcpu, r->reg); } - trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); + trace_trap_reg(__func__, r->reg, p->is_write, p->regval); return true; } @@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, * hyp.S code switches between host and guest values in future. */ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, u64 *dbg_reg) { - u64 val = *vcpu_reg(vcpu, p->Rt); + u64 val = p->regval; if (p->is_32bit) { val &= 0xffffffffUL; @@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, } static inline void dbg_to_reg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, u64 *dbg_reg) { - u64 val = *dbg_reg; - + p->regval = *dbg_reg; if (p->is_32bit) - val &= 0xffffffffUL; - - *vcpu_reg(vcpu, p->Rt) = val; + p->regval &= 0xffffffffUL; } static inline bool trap_bvr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; @@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu, } static inline bool trap_bcr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; @@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu, } static inline bool trap_wvr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; @@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu, } static inline bool trap_wcr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; @@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { }; static bool trap_dbgidr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { @@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); - *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | - (6 << 16) | (el3 << 14) | (el3 << 12)); + p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | + (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | + (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) + | (6 << 16) | (el3 << 14) | (el3 << 12)); return true; } } static bool trap_debug32(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { - vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); + vcpu_cp14(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { - *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); + p->regval = vcpu_cp14(vcpu, r->reg); } return true; @@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, */ static inline bool trap_xvr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; @@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu, u64 val = *dbg_reg; val &= 0xffffffffUL; - val |= *vcpu_reg(vcpu, p->Rt) << 32; + val |= p->regval << 32; *dbg_reg = val; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { - *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; + p->regval = *dbg_reg >> 32; } trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); @@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) * Return 0 if the access has been handled, and -1 if not. */ static int emulate_cp(struct kvm_vcpu *vcpu, - const struct sys_reg_params *params, + struct sys_reg_params *params, const struct sys_reg_desc *table, size_t num) { @@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); + int Rt = (hsr >> 5) & 0xf; int Rt2 = (hsr >> 10) & 0xf; params.is_aarch32 = true; params.is_32bit = false; params.CRm = (hsr >> 1) & 0xf; - params.Rt = (hsr >> 5) & 0xf; params.is_write = ((hsr & 1) == 0); params.Op0 = 0; @@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, params.CRn = 0; /* - * Massive hack here. Store Rt2 in the top 32bits so we only - * have one register to deal with. As we use the same trap + * Make a 64-bit value out of Rt and Rt2. As we use the same trap * backends between AArch32 and AArch64, we get away with it. */ if (params.is_write) { - u64 val = *vcpu_reg(vcpu, params.Rt); - val &= 0xffffffff; - val |= *vcpu_reg(vcpu, Rt2) << 32; - *vcpu_reg(vcpu, params.Rt) = val; + params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; + params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; } if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) @@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, unhandled_cp_access(vcpu, ¶ms); out: - /* Do the opposite hack for the read side */ + /* Split up the value between registers for the read side */ if (!params.is_write) { - u64 val = *vcpu_reg(vcpu, params.Rt); - val >>= 32; - *vcpu_reg(vcpu, Rt2) = val; + vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); + vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); } return 1; @@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); + int Rt = (hsr >> 5) & 0xf; params.is_aarch32 = true; params.is_32bit = true; params.CRm = (hsr >> 1) & 0xf; - params.Rt = (hsr >> 5) & 0xf; + params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = ((hsr & 1) == 0); params.CRn = (hsr >> 10) & 0xf; params.Op0 = 0; params.Op1 = (hsr >> 14) & 0x7; params.Op2 = (hsr >> 17) & 0x7; - if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) - return 1; - if (!emulate_cp(vcpu, ¶ms, global, nr_global)) + if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || + !emulate_cp(vcpu, ¶ms, global, nr_global)) { + if (!params.is_write) + vcpu_set_reg(vcpu, Rt, params.regval); return 1; + } unhandled_cp_access(vcpu, ¶ms); return 1; @@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) } static int emulate_sys_reg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *params) + struct sys_reg_params *params) { size_t num; const struct sys_reg_desc *table, *r; @@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); + int Rt = (esr >> 5) & 0x1f; + int ret; trace_kvm_handle_sys_reg(esr); @@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) params.CRn = (esr >> 10) & 0xf; params.CRm = (esr >> 1) & 0xf; params.Op2 = (esr >> 17) & 0x7; - params.Rt = (esr >> 5) & 0x1f; + params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = !(esr & 1); - return emulate_sys_reg(vcpu, ¶ms); + ret = emulate_sys_reg(vcpu, ¶ms); + + if (!params.is_write) + vcpu_set_reg(vcpu, Rt, params.regval); + return ret; } /****************************************************************************** diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index eaa324e4db4d..dbbb01cfbee9 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -28,7 +28,7 @@ struct sys_reg_params { u8 CRn; u8 CRm; u8 Op2; - u8 Rt; + u64 regval; bool is_write; bool is_aarch32; bool is_32bit; /* Only valid if is_aarch32 is true */ @@ -44,7 +44,7 @@ struct sys_reg_desc { /* Trapped access from guest, if non-NULL. */ bool (*access)(struct kvm_vcpu *, - const struct sys_reg_params *, + struct sys_reg_params *, const struct sys_reg_desc *); /* Initialization for vcpu. */ @@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu, } static inline bool read_zero(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p) + struct sys_reg_params *p) { - *vcpu_reg(vcpu, p->Rt) = 0; + p->regval = 0; return true; } diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index 1e4576824165..ed90578fa120 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -31,13 +31,13 @@ #include "sys_regs.h" static bool access_actlr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); + p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1); return true; } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index f636a2639f03..e87f53ff5f58 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu) __flush_icache_all(); } -static int is_reserved_asid(u64 asid) +static bool check_update_reserved_asid(u64 asid, u64 newasid) { int cpu; - for_each_possible_cpu(cpu) - if (per_cpu(reserved_asids, cpu) == asid) - return 1; - return 0; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_asids, cpu) == asid) { + hit = true; + per_cpu(reserved_asids, cpu) = newasid; + } + } + + return hit; } static u64 new_context(struct mm_struct *mm, unsigned int cpu) @@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK); + /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ - if (is_reserved_asid(asid)) - return generation | (asid & ~ASID_MASK); + if (check_update_reserved_asid(asid, newasid)) + return newasid; /* * We had a valid ASID in a previous life, so try to re-use @@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) */ asid &= ~ASID_MASK; if (!__test_and_set_bit(asid, asid_map)) - goto bump_gen; + return newasid; } /* @@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - -bump_gen: - asid |= generation; - return asid; + return asid | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 19211c4a8911..92ddac1e8ca2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -393,16 +393,16 @@ static struct fault_info { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, - { do_bad, SIGBUS, 0, "reserved access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, - { do_bad, SIGBUS, 0, "reserved permission fault" }, + { do_bad, SIGBUS, 0, "unknown 12" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, - { do_bad, SIGBUS, 0, "asynchronous external abort" }, + { do_bad, SIGBUS, 0, "unknown 17" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, @@ -410,16 +410,16 @@ static struct fault_info { { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, - { do_bad, SIGBUS, 0, "asynchronous parity error" }, + { do_bad, SIGBUS, 0, "unknown 25" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, { do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, - { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGBUS, 0, "unknown 34" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 37" }, @@ -433,21 +433,21 @@ static struct fault_info { { do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, - { do_bad, SIGBUS, 0, "unknown 48" }, + { do_bad, SIGBUS, 0, "TLB conflict abort" }, { do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, - { do_bad, SIGBUS, 0, "unknown 53" }, + { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" }, { do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 57" }, - { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, + { do_bad, SIGBUS, 0, "unknown 58" }, { do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 60" }, - { do_bad, SIGBUS, 0, "unknown 61" }, - { do_bad, SIGBUS, 0, "unknown 62" }, + { do_bad, SIGBUS, 0, "section domain fault" }, + { do_bad, SIGBUS, 0, "page domain fault" }, { do_bad, SIGBUS, 0, "unknown 63" }, }; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index abb66f84d4ac..873e363048c6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot); static void __init *early_alloc(unsigned long sz) { - void *ptr = __va(memblock_alloc(sz, sz)); - BUG_ON(!ptr); + phys_addr_t phys; + void *ptr; + + phys = memblock_alloc(sz, sz); + BUG_ON(!phys); + ptr = __va(phys); memset(ptr, 0, sz); return ptr; } @@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) do { /* * Need to have the least restrictive permissions available - * permissions will be fixed up later. Default the new page - * range as contiguous ptes. + * permissions will be fixed up later */ - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); pfn++; } while (pte++, i++, i < PTRS_PER_PTE); } -/* - * Given a PTE with the CONT bit set, determine where the CONT range - * starts, and clear the entire range of PTE CONT bits. - */ -static void clear_cont_pte_range(pte_t *pte, unsigned long addr) -{ - int i; - - pte -= CONT_RANGE_OFFSET(addr); - for (i = 0; i < CONT_PTES; i++) { - set_pte(pte, pte_mknoncont(*pte)); - pte++; - } - flush_tlb_all(); -} - -/* - * Given a range of PTEs set the pfn and provided page protection flags - */ -static void __populate_init_pte(pte_t *pte, unsigned long addr, - unsigned long end, phys_addr_t phys, - pgprot_t prot) -{ - unsigned long pfn = __phys_to_pfn(phys); - - do { - /* clear all the bits except the pfn, then apply the prot */ - set_pte(pte, pfn_pte(pfn, prot)); - pte++; - pfn++; - addr += PAGE_SIZE; - } while (addr != end); -} - static void alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, phys_addr_t phys, + unsigned long end, unsigned long pfn, pgprot_t prot, void *(*alloc)(unsigned long size)) { pte_t *pte; - unsigned long next; if (pmd_none(*pmd) || pmd_sect(*pmd)) { pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); @@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - next = min(end, (addr + CONT_SIZE) & CONT_MASK); - if (((addr | next | phys) & ~CONT_MASK) == 0) { - /* a block of CONT_PTES */ - __populate_init_pte(pte, addr, next, phys, - __pgprot(pgprot_val(prot) | PTE_CONT)); - } else { - /* - * If the range being split is already inside of a - * contiguous range but this PTE isn't going to be - * contiguous, then we want to unmark the adjacent - * ranges, then update the portion of the range we - * are interrested in. - */ - clear_cont_pte_range(pte, addr); - __populate_init_pte(pte, addr, next, phys, prot); - } - - pte += (next - addr) >> PAGE_SHIFT; - phys += next - addr; - addr = next; - } while (addr != end); + set_pte(pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); } static void split_pud(pud_t *old_pud, pmd_t *pmd) @@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } } } else { - alloc_init_pte(pmd, addr, next, phys, prot, alloc); + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), + prot, alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index d6a53ef2350b..b162ad70effc 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) /* Stack must be multiples of 16B */ #define STACK_ALIGN(sz) (((sz) + 15) & ~15) +#define _STACK_SIZE \ + (MAX_BPF_STACK \ + + 4 /* extra for skb_copy_bits buffer */) + +#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) + static void build_prologue(struct jit_ctx *ctx) { const u8 r6 = bpf2a64[BPF_REG_6]; @@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx) const u8 rx = bpf2a64[BPF_REG_X]; const u8 tmp1 = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; - int stack_size = MAX_BPF_STACK; - - stack_size += 4; /* extra for skb_copy_bits buffer */ - stack_size = STACK_ALIGN(stack_size); /* * BPF prog stack layout @@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx) * | ... | callee saved registers * +-----+ * | | x25/x26 - * BPF fp register => -80:+-----+ + * BPF fp register => -80:+-----+ <= (BPF_FP) * | | * | ... | BPF prog stack * | | - * | | - * current A64_SP => +-----+ + * +-----+ <= (BPF_FP - MAX_BPF_STACK) + * |RSVD | JIT scratchpad + * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) * | | * | ... | Function call stack * | | @@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx) emit(A64_MOV(1, fp, A64_SP), ctx); /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); + emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); /* Clear registers A and X */ emit_a64_mov_i64(ra, 0, ctx); @@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx) const u8 fp = bpf2a64[BPF_REG_FP]; const u8 tmp1 = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; - int stack_size = MAX_BPF_STACK; - - stack_size += 4; /* extra for skb_copy_bits buffer */ - stack_size = STACK_ALIGN(stack_size); /* We're done with BPF stack */ - emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); + emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); /* Restore fs (x25) and x26 */ emit(A64_POP(fp, A64_R(26), A64_SP), ctx); @@ -591,7 +590,25 @@ emit_cond_jmp: case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_DW: - goto notyet; + /* Load imm to a register then store it */ + ctx->tmp_used = 1; + emit_a64_mov_i(1, tmp2, off, ctx); + emit_a64_mov_i(1, tmp, imm, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(A64_STR32(tmp, dst, tmp2), ctx); + break; + case BPF_H: + emit(A64_STRH(tmp, dst, tmp2), ctx); + break; + case BPF_B: + emit(A64_STRB(tmp, dst, tmp2), ctx); + break; + case BPF_DW: + emit(A64_STR64(tmp, dst, tmp2), ctx); + break; + } + break; /* STX: *(size *)(dst + off) = src */ case BPF_STX | BPF_MEM | BPF_W: @@ -658,7 +675,7 @@ emit_cond_jmp: return -EINVAL; } emit_a64_mov_i64(r3, size, ctx); - emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); + emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx); emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c index f7836c6a6b60..c32f76791f48 100644 --- a/arch/m68k/coldfire/m54xx.c +++ b/arch/m68k/coldfire/m54xx.c @@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void) memstart = PAGE_ALIGN(_ramstart); min_low_pfn = PFN_DOWN(_rambase); start_pfn = PFN_DOWN(memstart); - max_low_pfn = PFN_DOWN(_ramend); + max_pfn = max_low_pfn = PFN_DOWN(_ramend); high_memory = (void *)_ramend; m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 0793a7f17417..f9d96bf86910 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 375 +#define NR_syscalls 376 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 5e6fae6c275f..36cf129de663 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -380,5 +380,6 @@ #define __NR_sendmmsg 372 #define __NR_userfaultfd 373 #define __NR_membarrier 374 +#define __NR_mlock2 375 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 88c27d94a721..76b9113f3092 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p) * Give all the memory to the bootmap allocator, tell it to put the * boot mem_map at the start of memory. */ + min_low_pfn = PFN_DOWN(memory_start); + max_pfn = max_low_pfn = PFN_DOWN(memory_end); + bootmap_size = init_bootmem_node( NODE_DATA(0), - memory_start >> PAGE_SHIFT, /* map goes here */ - PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ - memory_end >> PAGE_SHIFT); + min_low_pfn, /* map goes here */ + PFN_DOWN(PAGE_OFFSET), + max_pfn); /* * Free the usable memory, we have to make sure we do not free * the bootmem bitmap so we then reserve it after freeing it :-) diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 5dd0e80042f5..282cd903f4c4 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -395,3 +395,4 @@ ENTRY(sys_call_table) .long sys_sendmmsg .long sys_userfaultfd .long sys_membarrier + .long sys_mlock2 /* 375 */ diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index b958916e5eac..8f37fdd80be9 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -250,7 +250,7 @@ void __init paging_init(void) high_memory = phys_to_virt(max_addr); min_low_pfn = availmem >> PAGE_SHIFT; - max_low_pfn = max_addr >> PAGE_SHIFT; + max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; for (i = 0; i < m68k_num_memory; i++) { addr = m68k_memory[i].addr; diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c index a8b942bf7163..2a5f43a68ae3 100644 --- a/arch/m68k/sun3/config.c +++ b/arch/m68k/sun3/config.c @@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start, memory_end = memory_end & PAGE_MASK; start_page = __pa(memory_start) >> PAGE_SHIFT; - num_pages = __pa(memory_end) >> PAGE_SHIFT; + max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT; high_memory = (void *)memory_end; availmem = memory_start; m68k_setup_node(0); - availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); + availmem += init_bootmem(start_page, num_pages); availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; free_bootmem(__pa(availmem), memory_end - (availmem)); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d5fa3eaf39a1..41b1b090f56f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, base = (inst >> 21) & 0x1f; op_inst = (inst >> 16) & 0x1f; - offset = inst & 0xffff; + offset = (int16_t)inst; cache = (inst >> 16) & 0x3; op = (inst >> 18) & 0x7; diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 7bab3a4e8f7d..7e2210846b8b 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run) FEXPORT(__kvm_mips_load_asid) /* Set the ASID for the Guest Kernel */ - INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ - /* addresses shift to 0x80000000 */ - bltz t0, 1f /* If kernel */ + PTR_L t0, VCPU_COP0(k1) + LONG_L t0, COP0_STATUS(t0) + andi t0, KSU_USER | ST0_ERL | ST0_EXL + xori t0, KSU_USER + bnez t0, 1f /* If kernel */ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 1: @@ -474,9 +476,11 @@ __kvm_mips_return_to_guest: mtc0 t0, CP0_EPC /* Set the ASID for the Guest Kernel */ - INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ - /* addresses shift to 0x80000000 */ - bltz t0, 1f /* If kernel */ + PTR_L t0, VCPU_COP0(k1) + LONG_L t0, COP0_STATUS(t0) + andi t0, KSU_USER | ST0_ERL | ST0_EXL + xori t0, KSU_USER + bnez t0, 1f /* If kernel */ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 1: diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 49ff3bfc007e..b9b803facdbf 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (!gebase) { err = -ENOMEM; - goto out_free_cpu; + goto out_uninit_cpu; } kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); @@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) out_free_gebase: kfree(gebase); +out_uninit_cpu: + kvm_vcpu_uninit(vcpu); + out_free_cpu: kfree(vcpu); diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index 8a978022630b..dbbeccc3d714 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -11,6 +11,7 @@ * by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/io.h> @@ -232,8 +233,7 @@ static int rt288x_pci_probe(struct platform_device *pdev) ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); - for (i = 0; i < 0xfffff; i++) - ; + udelay(1); rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c index 4f925e06c414..78b2ef49dbc7 100644 --- a/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/arch/mips/pmcs-msp71xx/msp_setup.c @@ -10,6 +10,8 @@ * option) any later version. */ +#include <linux/delay.h> + #include <asm/bootinfo.h> #include <asm/cacheflush.h> #include <asm/idle.h> @@ -77,7 +79,7 @@ void msp7120_reset(void) */ /* Wait a bit for the DDRC to settle */ - for (i = 0; i < 100000000; i++); + mdelay(125); #if defined(CONFIG_PMC_MSP7120_GW) /* diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c index 244f9427625b..db8f88b6a3af 100644 --- a/arch/mips/sni/reset.c +++ b/arch/mips/sni/reset.c @@ -3,6 +3,8 @@ * * Reset a SNI machine. */ +#include <linux/delay.h> + #include <asm/io.h> #include <asm/reboot.h> #include <asm/sni.h> @@ -32,9 +34,9 @@ void sni_machine_restart(char *command) for (;;) { for (i = 0; i < 100; i++) { kb_wait(); - for (j = 0; j < 100000 ; j++) - /* nothing */; + udelay(50); outb_p(0xfe, 0x64); /* pulse reset low */ + udelay(50); } } } diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 4434b54e1d87..78ae5552fdb8 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -1,6 +1,7 @@ config MN10300 def_bool y select HAVE_OPROFILE + select HAVE_UID16 select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select HAVE_ARCH_TRACEHOOK @@ -37,9 +38,6 @@ config HIGHMEM config NUMA def_bool n -config UID16 - def_bool y - config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 223cdcc8203f..87bf88ed04c6 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end) end += (cpuinfo.dcache_line_size - 1); end &= ~(cpuinfo.dcache_line_size - 1); - for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { - __asm__ __volatile__ (" flushda 0(%0)\n" - : /* Outputs */ - : /* Inputs */ "r"(addr) - /* : No clobber */); - } -} - -static void __flush_dcache_all(unsigned long start, unsigned long end) -{ - unsigned long addr; - - start &= ~(cpuinfo.dcache_line_size - 1); - end += (cpuinfo.dcache_line_size - 1); - end &= ~(cpuinfo.dcache_line_size - 1); - if (end > start + cpuinfo.dcache_size) end = start + cpuinfo.dcache_size; @@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page) void flush_cache_all(void) { - __flush_dcache_all(0, cpuinfo.dcache_size); + __flush_dcache(0, cpuinfo.dcache_size); __flush_icache(0, cpuinfo.icache_size); } @@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) */ unsigned long start = (unsigned long)page_address(page); - __flush_dcache_all(start, start + PAGE_SIZE); + __flush_dcache(start, start + PAGE_SIZE); } void flush_dcache_page(struct page *page) @@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, { flush_cache_page(vma, user_vaddr, page_to_pfn(page)); memcpy(dst, src, len); - __flush_dcache_all((unsigned long)src, (unsigned long)src + len); + __flush_dcache((unsigned long)src, (unsigned long)src + len); if (vma->vm_flags & VM_EXEC) __flush_icache((unsigned long)src, (unsigned long)src + len); } @@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, { flush_cache_page(vma, user_vaddr, page_to_pfn(page)); memcpy(dst, src, len); - __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); + __flush_dcache((unsigned long)dst, (unsigned long)dst + len); if (vma->vm_flags & VM_EXEC) __flush_icache((unsigned long)dst, (unsigned long)dst + len); } diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a908ada8e0a5..2220f7a60def 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -108,6 +108,7 @@ #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ +#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 75b6676c1a0b..646bf4d222c1 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr, msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; } + /* + * Use the current MSR TM suspended bit to track if we have + * checkpointed state outstanding. + * On signal delivery, we'd normally reclaim the checkpointed + * state to obtain stack pointer (see:get_tm_stackpointer()). + * This will then directly return to userspace without going + * through __switch_to(). However, if the stack frame is bad, + * we need to exit this thread which calls __switch_to() which + * will again attempt to reclaim the already saved tm state. + * Hence we need to check that we've not already reclaimed + * this state. + * We do this using the current MSR, rather tracking it in + * some specific thread_struct bit, as it has the additional + * benifit of checking for a potential TM bad thing exception. + */ + if (!MSR_TM_SUSPENDED(mfmsr())) + return; + tm_reclaim(thr, thr->regs->msr, cause); /* Having done the reclaim, we now have the checkpointed diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 0dbee465af7a..ef7c24e84a62 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs, return 1; #endif /* CONFIG_SPE */ + /* Get the top half of the MSR from the user context */ + if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) + return 1; + msr_hi <<= 32; + /* If TM bits are set to the reserved value, it's an invalid context */ + if (MSR_TM_RESV(msr_hi)) + return 1; + /* Pull in the MSR TM bits from the user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the * transactional versions should be loaded. @@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); - /* Get the top half of the MSR */ - if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) - return 1; - /* Pull in MSR TM from user context */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 20756dfb9f34..c676ecec0869 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); + /* Don't allow reserved mode. */ + if (MSR_TM_RESV(msr)) + return -EINVAL; + /* pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 373e32346d68..6a75352f453c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) src_id, 0); /* sending vcpu invalid */ - if (src_id >= KVM_MAX_VCPUS || - kvm_get_vcpu(vcpu->kvm, src_id) == NULL) + if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) return -EINVAL; if (sclp.has_sigpif) @@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, irq->u.emerg.code, 0); + /* sending vcpu invalid */ + if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) + return -EINVAL; + set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); atomic_or(CPUSTAT_EXT_INT, li->cpuflags); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8fe2f1c722dc..846589281b04 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = 0; break; case KVM_CAP_S390_VECTOR_REGISTERS: - if (MACHINE_HAS_VX) { + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus)) { + r = -EBUSY; + } else if (MACHINE_HAS_VX) { set_kvm_facility(kvm->arch.model.fac->mask, 129); set_kvm_facility(kvm->arch.model.fac->list, 129); r = 0; } else r = -EINVAL; + mutex_unlock(&kvm->lock); VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", r ? "(not available)" : "(success)"); break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 77191b85ea7a..d76b51cb4b62 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, ®1, ®2); - if (!MACHINE_HAS_PFMF) + if (!test_kvm_facility(vcpu->kvm, 8)) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index da690b69f9fe..77c22d685c7a 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, u16 cpu_addr, u32 parameter, u64 *status_reg) { int rc; - struct kvm_vcpu *dst_vcpu; + struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); if (!dst_vcpu) return SIGP_CC_NOT_OPERATIONAL; @@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); if (order_code == SIGP_EXTERNAL_CALL) { - dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); BUG_ON(dest_vcpu == NULL); kvm_s390_vcpu_wakeup(dest_vcpu); diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 0033e96c3f09..9011a88353de 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -23,7 +23,6 @@ #include <stdarg.h> #include <linux/types.h> #include <linux/edd.h> -#include <asm/boot.h> #include <asm/setup.h> #include "bitops.h" #include "ctype.h" diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c index aa8a96b052e3..95c7a818c0ed 100644 --- a/arch/x86/boot/video-mode.c +++ b/arch/x86/boot/video-mode.c @@ -19,6 +19,8 @@ #include "video.h" #include "vesa.h" +#include <uapi/asm/boot.h> + /* * Common variables */ diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 05111bb8d018..77780e386e9b 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -13,6 +13,8 @@ * Select video mode */ +#include <uapi/asm/boot.h> + #include "boot.h" #include "video.h" #include "vesa.h" diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 53616ca03244..a55697d19824 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -509,6 +509,17 @@ END(irq_entries_start) * tracking that we're in kernel mode. */ SWAPGS + + /* + * We need to tell lockdep that IRQs are off. We can't do this until + * we fix gsbase, and we should do it before enter_from_user_mode + * (which can take locks). Since TRACE_IRQS_OFF idempotent, + * the simplest way to handle it is to just call it twice if + * we enter from user mode. There's no reason to optimize this since + * TRACE_IRQS_OFF is a no-op if lockdep is off. + */ + TRACE_IRQS_OFF + #ifdef CONFIG_CONTEXT_TRACKING call enter_from_user_mode #endif @@ -1049,12 +1060,18 @@ ENTRY(error_entry) SWAPGS .Lerror_entry_from_usermode_after_swapgs: + /* + * We need to tell lockdep that IRQs are off. We can't do this until + * we fix gsbase, and we should do it before enter_from_user_mode + * (which can take locks). + */ + TRACE_IRQS_OFF #ifdef CONFIG_CONTEXT_TRACKING call enter_from_user_mode #endif + ret .Lerror_entry_done: - TRACE_IRQS_OFF ret diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index c5b7fb2774d0..cc071c6f7d4d 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -9,19 +9,21 @@ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) + +#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) + #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) -/* Cast PAGE_MASK to a signed type so that it is sign-extended if +/* Cast *PAGE_MASK to a signed type so that it is sign-extended if virtual addresses are 32-bits but physical addresses are larger (ie, 32-bit PAE). */ #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) - -#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) -#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) - -#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) -#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK) +#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK) #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index dd5b0aa9dd2f..a471cadb9630 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) static inline pudval_t pud_pfn_mask(pud_t pud) { if (native_pud_val(pud) & _PAGE_PSE) - return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK; + return PHYSICAL_PUD_PAGE_MASK; else return PTE_PFN_MASK; } static inline pudval_t pud_flags_mask(pud_t pud) { - if (native_pud_val(pud) & _PAGE_PSE) - return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK); - else - return ~PTE_PFN_MASK; + return ~pud_pfn_mask(pud); } static inline pudval_t pud_flags(pud_t pud) @@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud) static inline pmdval_t pmd_pfn_mask(pmd_t pmd) { if (native_pmd_val(pmd) & _PAGE_PSE) - return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK; + return PHYSICAL_PMD_PAGE_MASK; else return PTE_PFN_MASK; } static inline pmdval_t pmd_flags_mask(pmd_t pmd) { - if (native_pmd_val(pmd) & _PAGE_PSE) - return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK); - else - return ~PTE_PFN_MASK; + return ~pmd_pfn_mask(pmd); } static inline pmdval_t pmd_flags(pmd_t pmd) diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 48d34d28f5a6..cd0fc0cc78bc 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_PLATFORM_H #define _ASM_X86_PLATFORM_H -#include <asm/pgtable_types.h> #include <asm/bootparam.h> struct mpc_bus; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 7fc27f1cca58..b3e94ef461fd 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -698,3 +698,4 @@ int __init microcode_init(void) return error; } +late_initcall(microcode_init); diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index db9a675e751b..bca14c899137 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -547,6 +547,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = { INTEL_CHV_IDS(&chv_stolen_funcs), INTEL_SKL_IDS(&gen9_stolen_funcs), INTEL_BXT_IDS(&gen9_stolen_funcs), + INTEL_KBL_IDS(&gen9_stolen_funcs), }; static void __init intel_graphics_stolen(int num, int slot, int func) diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c index 4f00b63d7ff3..14415aff1813 100644 --- a/arch/x86/kernel/pmem.c +++ b/arch/x86/kernel/pmem.c @@ -4,10 +4,22 @@ */ #include <linux/platform_device.h> #include <linux/module.h> +#include <linux/ioport.h> + +static int found(u64 start, u64 end, void *data) +{ + return 1; +} static __init int register_e820_pmem(void) { + char *pmem = "Persistent Memory (legacy)"; struct platform_device *pdev; + int rc; + + rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found); + if (rc <= 0) + return 0; /* * See drivers/nvdimm/e820.c for the implementation, this is diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 29db25f9a745..d2bbe343fda7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled(EFI_BOOT)) efi_apply_memmap_quirks(); #endif - - microcode_init(); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b7ffb7c00075..cb6282c3638f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) signal_setup_done(failed, ksig, stepping); } -#ifdef CONFIG_X86_32 -#define NR_restart_syscall __NR_restart_syscall -#else /* !CONFIG_X86_32 */ -#define NR_restart_syscall \ - test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall -#endif /* CONFIG_X86_32 */ +static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) +{ +#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64) + return __NR_restart_syscall; +#else /* !CONFIG_X86_32 && CONFIG_X86_64 */ + return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : + __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); +#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */ +} /* * Note that 'init' is a special process: it doesn't get signals it doesn't @@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs) break; case -ERESTART_RESTARTBLOCK: - regs->ax = NR_restart_syscall; + regs->ax = get_nr_restart_syscall(regs); regs->ip -= 2; break; } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 892ee2e5ecbc..fbabe4fcc7fb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid) */ #define UDELAY_10MS_DEFAULT 10000 -static unsigned int init_udelay = INT_MAX; +static unsigned int init_udelay = UINT_MAX; static int __init cpu_init_udelay(char *str) { @@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay); static void __init smp_quirk_init_udelay(void) { /* if cmdline changed it from default, leave it alone */ - if (init_udelay != INT_MAX) + if (init_udelay != UINT_MAX) return; /* if modern processor, use no delay */ if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || - ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) + ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { init_udelay = 0; - + return; + } /* else, use legacy delay */ init_udelay = UDELAY_10MS_DEFAULT; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 87acc5221740..af823a388c19 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) switch (type) { case VMX_VPID_EXTENT_ALL_CONTEXT: - if (get_vmcs12(vcpu)->virtual_processor_id == 0) { - nested_vmx_failValid(vcpu, - VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return 1; - } __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); nested_vmx_succeed(vcpu); break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00462bd63129..eed32283d22c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, return 0; } +static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) +{ + return (!lapic_in_kernel(vcpu) || + kvm_apic_accept_pic_intr(vcpu)); +} + +/* + * if userspace requested an interrupt window, check that the + * interrupt window is open. + * + * No need to exit to userspace if we already have an interrupt queued. + */ +static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) +{ + return kvm_arch_interrupt_allowed(vcpu) && + !kvm_cpu_has_interrupt(vcpu) && + !kvm_event_needs_reinjection(vcpu) && + kvm_cpu_accept_dm_intr(vcpu); +} + static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { @@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, return -EEXIST; vcpu->arch.pending_external_vector = irq->irq; + kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } @@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } -/* - * Check if userspace requested an interrupt window, and that the - * interrupt window is open. - * - * No need to exit to userspace if we already have an interrupt queued. - */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { - if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) - return false; - - if (kvm_cpu_has_interrupt(vcpu)) - return false; - - return (irqchip_split(vcpu->kvm) - ? kvm_apic_accept_pic_intr(vcpu) - : kvm_arch_interrupt_allowed(vcpu)); + return vcpu->run->request_interrupt_window && + likely(!pic_in_kernel(vcpu->kvm)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) @@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); - if (!irqchip_in_kernel(vcpu->kvm)) - kvm_run->ready_for_interrupt_injection = - kvm_arch_interrupt_allowed(vcpu) && - !kvm_cpu_has_interrupt(vcpu) && - !kvm_event_needs_reinjection(vcpu); - else if (!pic_in_kernel(vcpu->kvm)) - kvm_run->ready_for_interrupt_injection = - kvm_apic_accept_pic_intr(vcpu) && - !kvm_cpu_has_interrupt(vcpu); - else - kvm_run->ready_for_interrupt_injection = 1; + kvm_run->ready_for_interrupt_injection = + pic_in_kernel(vcpu->kvm) || + kvm_vcpu_ready_for_interrupt_injection(vcpu); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) @@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; - bool req_int_win = !lapic_in_kernel(vcpu) && - vcpu->run->request_interrupt_window; + bool req_int_win = + dm_request_for_irq_injection(vcpu) && + kvm_cpu_accept_dm_intr(vcpu); + bool req_immediate_exit = false; if (vcpu->requests) { @@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu) if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); - if (dm_request_for_irq_injection(vcpu)) { + if (dm_request_for_irq_injection(vcpu) && + kvm_vcpu_ready_for_interrupt_injection(vcpu)) { r = 0; vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; ++vcpu->stat.request_irq_exits; diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 1202d5ca2fb5..b2fd67da1701 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); - if (X86_REX_B(insn->rex_prefix.value) == 1) + if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); - if (X86_REX_X(insn->rex_prefix.value) == 1) + if (X86_REX_X(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); - if (X86_REX_B(insn->rex_prefix.value) == 1) + if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c index 7bcf06a7cd12..6eb3c8af96e2 100644 --- a/arch/x86/pci/bus_numa.c +++ b/arch/x86/pci/bus_numa.c @@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) if (!found) pci_add_resource(resources, &info->busn); - list_for_each_entry(root_res, &info->resources, list) { - struct resource *res; - struct resource *root; + list_for_each_entry(root_res, &info->resources, list) + pci_add_resource(resources, &root_res->res); - res = &root_res->res; - pci_add_resource(resources, res); - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - else - root = &iomem_resource; - insert_resource(root, res); - } return; default_resources: diff --git a/block/blk-core.c b/block/blk-core.c index 5131993b23a1..a0af4043dda2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio) EXPORT_SYMBOL(submit_bio); /** - * blk_rq_check_limits - Helper function to check a request for the queue limit + * blk_cloned_rq_check_limits - Helper function to check a cloned request + * for new the queue limits * @q: the queue * @rq: the request being checked * @@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio); * after it is inserted to @q, it should be checked against @q before * the insertion using this generic function. * - * This function should also be useful for request stacking drivers - * in some cases below, so export this function. * Request stacking drivers like request-based dm may change the queue - * limits while requests are in the queue (e.g. dm's table swapping). - * Such request stacking drivers should check those requests against - * the new queue limits again when they dispatch those requests, - * although such checkings are also done against the old queue limits - * when submitting requests. + * limits when retrying requests on other queues. Those requests need + * to be checked against the new queue limits again during dispatch. */ -int blk_rq_check_limits(struct request_queue *q, struct request *rq) +static int blk_cloned_rq_check_limits(struct request_queue *q, + struct request *rq) { - if (!rq_mergeable(rq)) - return 0; - if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; @@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) return 0; } -EXPORT_SYMBOL_GPL(blk_rq_check_limits); /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request @@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) unsigned long flags; int where = ELEVATOR_INSERT_BACK; - if (blk_rq_check_limits(q, rq)) + if (blk_cloned_rq_check_limits(q, rq)) return -EIO; if (rq->rq_disk && diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..e01405a3e8b3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio_vec bv, bvprv, *bvprvp = NULL; struct bvec_iter iter; unsigned seg_size = 0, nsegs = 0, sectors = 0; + unsigned front_seg_size = bio->bi_seg_front_size; + bool do_split = true; + struct bio *new = NULL; bio_for_each_segment(bv, bio, iter) { if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) @@ -98,8 +101,11 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, seg_size += bv.bv_len; bvprv = bv; - bvprvp = &bv; + bvprvp = &bvprv; sectors += bv.bv_len >> 9; + + if (nsegs == 1 && seg_size > front_seg_size) + front_seg_size = seg_size; continue; } new_segment: @@ -108,16 +114,29 @@ new_segment: nsegs++; bvprv = bv; - bvprvp = &bv; + bvprvp = &bvprv; seg_size = bv.bv_len; sectors += bv.bv_len >> 9; + + if (nsegs == 1 && seg_size > front_seg_size) + front_seg_size = seg_size; } - *segs = nsegs; - return NULL; + do_split = false; split: *segs = nsegs; - return bio_split(bio, sectors, GFP_NOIO, bs); + + if (do_split) { + new = bio_split(bio, sectors, GFP_NOIO, bs); + if (new) + bio = new; + } + + bio->bi_seg_front_size = front_seg_size; + if (seg_size > bio->bi_seg_back_size) + bio->bi_seg_back_size = seg_size; + + return do_split ? new : NULL; } void blk_queue_split(struct request_queue *q, struct bio **bio, @@ -412,6 +431,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, if (sg) sg_mark_end(sg); + /* + * Something must have been wrong if the figured number of + * segment is bigger than number of req's physical segments + */ + WARN_ON(nsegs > rq->nr_phys_segments); + return nsegs; } EXPORT_SYMBOL(blk_rq_map_sg); diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); /* - * we do limited pluging. If bio can be merged, do merge. + * We do limited pluging. If the bio can be merged, do that. * Otherwise the existing request in the plug list will be * issued. So the plug list will have one request at most */ if (plug) { /* * The plug list might get flushed before this. If that - * happens, same_queue_rq is invalid and plug list is empty - **/ + * happens, same_queue_rq is invalid and plug list is + * empty + */ if (same_queue_rq && !list_empty(&plug->mq_list)) { old_rq = same_queue_rq; list_del_init(&old_rq->queuelist); @@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); if (!request_count) trace_block_plug(q); - else if (request_count >= BLK_MAX_REQUEST_COUNT) { + + blk_mq_put_ctx(data.ctx); + + if (request_count >= BLK_MAX_REQUEST_COUNT) { blk_flush_plug_list(plug, false); trace_block_plug(q); } + list_add_tail(&rq->queuelist, &plug->mq_list); - blk_mq_put_ctx(data.ctx); return cookie; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 7d8f129a1516..dd4973583978 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim) lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->virt_boundary_mask = 0; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; - lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; + lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = + BLK_SAFE_MAX_SECTORS; lim->chunk_sectors = 0; lim->max_write_same_sectors = 0; lim->max_discard_sectors = 0; @@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) lim->max_hw_sectors = UINT_MAX; lim->max_segment_size = UINT_MAX; lim->max_sectors = UINT_MAX; + lim->max_dev_sectors = UINT_MAX; lim->max_write_same_sectors = UINT_MAX; } EXPORT_SYMBOL(blk_set_stacking_limits); @@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) EXPORT_SYMBOL(blk_queue_bounce_limit); /** - * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request - * @limits: the queue limits + * blk_queue_max_hw_sectors - set max sectors for a request for this queue + * @q: the request queue for the device * @max_hw_sectors: max hardware sectors in the usual 512b unit * * Description: @@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); * the device driver based upon the capabilities of the I/O * controller. * + * max_dev_sectors is a hard limit imposed by the storage device for + * READ/WRITE requests. It is set by the disk driver. + * * max_sectors is a soft limit imposed by the block layer for * filesystem type requests. This value can be overridden on a * per-device basis in /sys/block/<device>/queue/max_sectors_kb. * The soft limit can not exceed max_hw_sectors. **/ -void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) { + struct queue_limits *limits = &q->limits; + unsigned int max_sectors; + if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); printk(KERN_INFO "%s: set to minimum %d\n", @@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_ } limits->max_hw_sectors = max_hw_sectors; - limits->max_sectors = min_t(unsigned int, max_hw_sectors, - BLK_DEF_MAX_SECTORS); -} -EXPORT_SYMBOL(blk_limits_max_hw_sectors); - -/** - * blk_queue_max_hw_sectors - set max sectors for a request for this queue - * @q: the request queue for the device - * @max_hw_sectors: max hardware sectors in the usual 512b unit - * - * Description: - * See description for blk_limits_max_hw_sectors(). - **/ -void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) -{ - blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); + max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); + max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); + limits->max_sectors = max_sectors; } EXPORT_SYMBOL(blk_queue_max_hw_sectors); @@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); + t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); t->max_write_same_sectors = min(t->max_write_same_sectors, b->max_write_same_sectors); t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 565b8dac5782..e140cc487ce1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) if (ret < 0) return ret; + max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) + q->limits.max_dev_sectors >> 1); + if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 246dfb16c3d9..aa40aa93381b 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -158,11 +158,13 @@ void blk_abort_request(struct request *req) { if (blk_mark_rq_complete(req)) return; - blk_delete_timer(req); - if (req->q->mq_ops) + + if (req->q->mq_ops) { blk_mq_rq_timed_out(req, false); - else + } else { + blk_delete_timer(req); blk_rq_timed_out(req); + } } EXPORT_SYMBOL_GPL(blk_abort_request); diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, static int noop_dispatch(struct request_queue *q, int force) { struct noop_data *nd = q->elevator->elevator_data; + struct request *rq; - if (!list_empty(&nd->queue)) { - struct request *rq; - rq = list_entry(nd->queue.next, struct request, queuelist); + rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); + if (rq) { list_del_init(&rq->queuelist); elv_dispatch_sort(q, rq); return 1; @@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) if (rq->queuelist.prev == &nd->queue) return NULL; - return list_entry(rq->queuelist.prev, struct request, queuelist); + return list_prev_entry(rq, queuelist); } static struct request * @@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) if (rq->queuelist.next == &nd->queue) return NULL; - return list_entry(rq->queuelist.next, struct request, queuelist); + return list_next_entry(rq, queuelist); } static int noop_init_queue(struct request_queue *q, struct elevator_type *e) diff --git a/block/partition-generic.c b/block/partition-generic.c index 3b030157ec85..746935a5973c 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev) struct hd_struct *part; int res; - if (bdev->bd_part_count) + if (bdev->bd_part_count || bdev->bd_super) return -EBUSY; res = invalidate_partition(disk, 0); if (res) diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c @@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) Sector sect; unsigned char *data; int slot, blocks_in_map; - unsigned secsize; + unsigned secsize, datasize, partoffset; #ifdef CONFIG_PPC_PMAC int found_root = 0; int found_root_goodness = 0; @@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) } secsize = be16_to_cpu(md->block_size); put_dev_sector(sect); - data = read_part_sector(state, secsize/512, §); + datasize = round_down(secsize, 512); + data = read_part_sector(state, datasize / 512, §); if (!data) return -1; - part = (struct mac_partition *) (data + secsize%512); + partoffset = secsize % 512; + if (partoffset + sizeof(*part) > datasize) + return -1; + part = (struct mac_partition *) (data + partoffset); if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { put_dev_sector(sect); return 0; /* not a MacOS disk */ diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdfb448a..6d4d4569447e 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) if (flags & MSG_DONTWAIT) return -EAGAIN; - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) @@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) } finish_wait(sk_sleep(sk), &wait); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee4057..ca9efe17db1a 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) if (flags & MSG_DONTWAIT) return -EAGAIN; - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (signal_pending(current)) @@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) return -EAGAIN; } - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) @@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) } finish_wait(sk_sleep(sk), &wait); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } diff --git a/drivers/Makefile b/drivers/Makefile index 73d039156ea7..795d0ca714bf 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ obj-$(CONFIG_PARPORT) += parport/ +obj-$(CONFIG_NVM) += lightnvm/ obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_LIBNVDIMM) += nvdimm/ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ @@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_SCSI) += scsi/ -obj-$(CONFIG_NVM) += lightnvm/ obj-y += nvme/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_TARGET_CORE) += target/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 25dbb76c02cc..5eef4cb4f70e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED bool config ACPI_DEBUGGER - bool "In-kernel debugger (EXPERIMENTAL)" + bool "AML debugger interface (EXPERIMENTAL)" select ACPI_DEBUG help - Enable in-kernel debugging facilities: statistics, internal + Enable in-kernel debugging of AML facilities: statistics, internal object dump, single step control method execution. This is still under development, currently enabling this only results in the compilation of the ACPICA debugger files. diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index f7dab53b352a..e7ed39bab97d 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) { + size_t length = min_t(size_t, sizeof(*spa), spa->header.length); struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; list_for_each_entry(nfit_spa, &prev->spas, list) { - if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { + if (memcmp(nfit_spa->spa, spa, length) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } @@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev) { + size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length); struct device *dev = acpi_desc->dev; struct nfit_memdev *nfit_memdev; list_for_each_entry(nfit_memdev, &prev->memdevs, list) - if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { + if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); return true; } @@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr) { + size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length); struct device *dev = acpi_desc->dev; struct nfit_dcr *nfit_dcr; list_for_each_entry(nfit_dcr, &prev->dcrs, list) - if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { + if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); return true; } @@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw) { + size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length); struct device *dev = acpi_desc->dev; struct nfit_bdw *nfit_bdw; list_for_each_entry(nfit_bdw, &prev->bdws, list) - if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { + if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); return true; } @@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt) { + size_t length = min_t(size_t, sizeof(*idt), idt->header.length); struct device *dev = acpi_desc->dev; struct nfit_idt *nfit_idt; list_for_each_entry(nfit_idt, &prev->idts, list) - if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { + if (memcmp(nfit_idt->idt, idt, length) == 0) { list_move_tail(&nfit_idt->list, &acpi_desc->idts); return true; } @@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush) { + size_t length = min_t(size_t, sizeof(*flush), flush->header.length); struct device *dev = acpi_desc->dev; struct nfit_flush *nfit_flush; list_for_each_entry(nfit_flush, &prev->flushes, list) - if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { + if (memcmp(nfit_flush->flush, flush, length) == 0) { list_move_tail(&nfit_flush->list, &acpi_desc->flushes); return true; } @@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev, struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); + return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); } static DEVICE_ATTR_RO(revision); @@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) data = (u8 *) acpi_desc->nfit; end = data + sz; - data += sizeof(struct acpi_table_nfit); while (!IS_ERR_OR_NULL(data)) data = add_table(acpi_desc, &prev, data, end); @@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev) return PTR_ERR(acpi_desc); } - acpi_desc->nfit = (struct acpi_table_nfit *) tbl; + /* + * Save the acpi header for later and then skip it, + * making nfit point to the first nfit table header. + */ + acpi_desc->acpi_header = *tbl; + acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); + sz -= sizeof(struct acpi_table_nfit); /* Evaluate _FIT and override with that if present */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_SUCCESS(status) && buf.length > 0) { - acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; - sz = buf.length; + union acpi_object *obj; + /* + * Adjust for the acpi_object header of the _FIT + */ + obj = buf.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + acpi_desc->nfit = + (struct acpi_nfit_header *)obj->buffer.pointer; + sz = obj->buffer.length; + } else + dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", + __func__, (int) obj->type); } rc = acpi_nfit_init(acpi_desc, sz); @@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_table_nfit *nfit_saved; + struct acpi_nfit_header *nfit_saved; + union acpi_object *obj; struct device *dev = &adev->dev; acpi_status status; int ret; @@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) } nfit_saved = acpi_desc->nfit; - acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; - ret = acpi_nfit_init(acpi_desc, buf.length); - if (!ret) { - /* Merge failed, restore old nfit, and exit */ - acpi_desc->nfit = nfit_saved; - dev_err(dev, "failed to merge updated NFIT\n"); + obj = buf.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + acpi_desc->nfit = + (struct acpi_nfit_header *)obj->buffer.pointer; + ret = acpi_nfit_init(acpi_desc, obj->buffer.length); + if (ret) { + /* Merge failed, restore old nfit, and exit */ + acpi_desc->nfit = nfit_saved; + dev_err(dev, "failed to merge updated NFIT\n"); + } + } else { + /* Bad _FIT, restore old nfit */ + dev_err(dev, "Invalid _FIT\n"); } kfree(buf.pointer); diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 2ea5c0797c8f..3d549a383659 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -96,7 +96,8 @@ struct nfit_mem { struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; - struct acpi_table_nfit *nfit; + struct acpi_table_header acpi_header; + struct acpi_nfit_header *nfit; struct mutex spa_map_mutex; struct mutex init_mutex; struct list_head spa_maps; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 850d7bf0c873..ae3fe4e64203 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info) else continue; + /* + * Some legacy x86 host bridge drivers use iomem_resource and + * ioport_resource as default resource pool, skip it. + */ + if (res == root) + continue; + conflict = insert_resource_conflict(root, res); if (conflict) { dev_info(&info->bridge->dev, diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e03b1ad25a90..167418e73445 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev) } pd = of_genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); if (IS_ERR(pd)) { dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - of_node_put(dev->of_node); return -EPROBE_DEFER; } @@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev) if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); - of_node_put(dev->of_node); goto out; } diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index e60dd12e23aa..1e937ac5f456 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct gpd_timing_data *td; s64 constraint_ns; - if (!pdd->dev->driver) - continue; - /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a28a562f7b7f..3457ac8c03e2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) sector_t capacity; unsigned int index = 0; struct kobject *kobj; - unsigned char thd_name[16]; if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ @@ -3958,10 +3957,9 @@ skip_create_disk: } start_service_thread: - sprintf(thd_name, "mtip_svc_thd_%02d", index); dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, - dd, dd->numa_node, "%s", - thd_name); + dd, dd->numa_node, + "mtip_svc_thd_%02d", index); if (IS_ERR(dd->mtip_svc_handler)) { dev_err(&dd->pdev->dev, "service thread failed to start\n"); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6255d1c4bba4..0c3940ec5e62 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/hrtimer.h> +#include <linux/lightnvm.h> struct nullb_cmd { struct list_head list; @@ -17,6 +18,7 @@ struct nullb_cmd { struct bio *bio; unsigned int tag; struct nullb_queue *nq; + struct hrtimer timer; }; struct nullb_queue { @@ -39,23 +41,14 @@ struct nullb { struct nullb_queue *queues; unsigned int nr_queues; + char disk_name[DISK_NAME_LEN]; }; static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; static int nullb_indexes; - -struct completion_queue { - struct llist_head list; - struct hrtimer timer; -}; - -/* - * These are per-cpu for now, they will need to be configured by the - * complete_queues parameter and appropriately mapped. - */ -static DEFINE_PER_CPU(struct completion_queue, completion_queues); +static struct kmem_cache *ppa_cache; enum { NULL_IRQ_NONE = 0, @@ -119,6 +112,10 @@ static int nr_devices = 2; module_param(nr_devices, int, S_IRUGO); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); +static bool use_lightnvm; +module_param(use_lightnvm, bool, S_IRUGO); +MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); + static int irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) @@ -135,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = { device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); -static int completion_nsec = 10000; -module_param(completion_nsec, int, S_IRUGO); +static unsigned long completion_nsec = 10000; +module_param(completion_nsec, ulong, S_IRUGO); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); static int hw_queue_depth = 64; @@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd) put_tag(cmd->nq, cmd->tag); } +static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); + static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) { struct nullb_cmd *cmd; @@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) cmd = &nq->cmds[tag]; cmd->tag = tag; cmd->nq = nq; + if (irqmode == NULL_IRQ_TIMER) { + hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + cmd->timer.function = null_cmd_timer_expired; + } return cmd; } @@ -213,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) static void end_cmd(struct nullb_cmd *cmd) { + struct request_queue *q = NULL; + switch (queue_mode) { case NULL_Q_MQ: blk_mq_end_request(cmd->rq, 0); @@ -223,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd) break; case NULL_Q_BIO: bio_endio(cmd->bio); - break; + goto free_cmd; } + if (cmd->rq) + q = cmd->rq->q; + + /* Restart queue if needed, as we are freeing a tag */ + if (q && !q->mq_ops && blk_queue_stopped(q)) { + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + if (blk_queue_stopped(q)) + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } +free_cmd: free_cmd(cmd); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) { - struct completion_queue *cq; - struct llist_node *entry; - struct nullb_cmd *cmd; - - cq = &per_cpu(completion_queues, smp_processor_id()); - - while ((entry = llist_del_all(&cq->list)) != NULL) { - entry = llist_reverse_order(entry); - do { - struct request_queue *q = NULL; - - cmd = container_of(entry, struct nullb_cmd, ll_list); - entry = entry->next; - if (cmd->rq) - q = cmd->rq->q; - end_cmd(cmd); - - if (q && !q->mq_ops && blk_queue_stopped(q)) { - spin_lock(q->queue_lock); - if (blk_queue_stopped(q)) - blk_start_queue(q); - spin_unlock(q->queue_lock); - } - } while (entry); - } + end_cmd(container_of(timer, struct nullb_cmd, timer)); return HRTIMER_NORESTART; } static void null_cmd_end_timer(struct nullb_cmd *cmd) { - struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); - - cmd->ll_list.next = NULL; - if (llist_add(&cmd->ll_list, &cq->list)) { - ktime_t kt = ktime_set(0, completion_nsec); + ktime_t kt = ktime_set(0, completion_nsec); - hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED); - } - - put_cpu(); + hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); } static void null_softirq_done_fn(struct request *rq) @@ -369,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); + if (irqmode == NULL_IRQ_TIMER) { + hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cmd->timer.function = null_cmd_timer_expired; + } cmd->rq = bd->rq; cmd->nq = hctx->driver_data; @@ -427,15 +419,156 @@ static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); - del_gendisk(nullb->disk); + if (use_lightnvm) + nvm_unregister(nullb->disk_name); + else + del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); - put_disk(nullb->disk); + if (!use_lightnvm) + put_disk(nullb->disk); cleanup_queues(nullb); kfree(nullb); } +#ifdef CONFIG_NVM + +static void null_lnvm_end_io(struct request *rq, int error) +{ + struct nvm_rq *rqd = rq->end_io_data; + struct nvm_dev *dev = rqd->dev; + + dev->mt->end_io(rqd, error); + + blk_put_request(rq); +} + +static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) +{ + struct request *rq; + struct bio *bio = rqd->bio; + + rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); + if (IS_ERR(rq)) + return -ENOMEM; + + rq->cmd_type = REQ_TYPE_DRV_PRIV; + rq->__sector = bio->bi_iter.bi_sector; + rq->ioprio = bio_prio(bio); + + if (bio_has_data(bio)) + rq->nr_phys_segments = bio_phys_segments(q, bio); + + rq->__data_len = bio->bi_iter.bi_size; + rq->bio = rq->biotail = bio; + + rq->end_io_data = rqd; + + blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); + + return 0; +} + +static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) +{ + sector_t size = gb * 1024 * 1024 * 1024ULL; + sector_t blksize; + struct nvm_id_group *grp; + + id->ver_id = 0x1; + id->vmnt = 0; + id->cgrps = 1; + id->cap = 0x3; + id->dom = 0x1; + + id->ppaf.blk_offset = 0; + id->ppaf.blk_len = 16; + id->ppaf.pg_offset = 16; + id->ppaf.pg_len = 16; + id->ppaf.sect_offset = 32; + id->ppaf.sect_len = 8; + id->ppaf.pln_offset = 40; + id->ppaf.pln_len = 8; + id->ppaf.lun_offset = 48; + id->ppaf.lun_len = 8; + id->ppaf.ch_offset = 56; + id->ppaf.ch_len = 8; + + do_div(size, bs); /* convert size to pages */ + do_div(size, 256); /* concert size to pgs pr blk */ + grp = &id->groups[0]; + grp->mtype = 0; + grp->fmtype = 0; + grp->num_ch = 1; + grp->num_pg = 256; + blksize = size; + do_div(size, (1 << 16)); + grp->num_lun = size + 1; + do_div(blksize, grp->num_lun); + grp->num_blk = blksize; + grp->num_pln = 1; + + grp->fpg_sz = bs; + grp->csecs = bs; + grp->trdt = 25000; + grp->trdm = 25000; + grp->tprt = 500000; + grp->tprm = 500000; + grp->tbet = 1500000; + grp->tbem = 1500000; + grp->mpos = 0x010101; /* single plane rwe */ + grp->cpar = hw_queue_depth; + + return 0; +} + +static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) +{ + mempool_t *virtmem_pool; + + virtmem_pool = mempool_create_slab_pool(64, ppa_cache); + if (!virtmem_pool) { + pr_err("null_blk: Unable to create virtual memory pool\n"); + return NULL; + } + + return virtmem_pool; +} + +static void null_lnvm_destroy_dma_pool(void *pool) +{ + mempool_destroy(pool); +} + +static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, + gfp_t mem_flags, dma_addr_t *dma_handler) +{ + return mempool_alloc(pool, mem_flags); +} + +static void null_lnvm_dev_dma_free(void *pool, void *entry, + dma_addr_t dma_handler) +{ + mempool_free(entry, pool); +} + +static struct nvm_dev_ops null_lnvm_dev_ops = { + .identity = null_lnvm_id, + .submit_io = null_lnvm_submit_io, + + .create_dma_pool = null_lnvm_create_dma_pool, + .destroy_dma_pool = null_lnvm_destroy_dma_pool, + .dev_dma_alloc = null_lnvm_dev_dma_alloc, + .dev_dma_free = null_lnvm_dev_dma_free, + + /* Simulate nvme protocol restriction */ + .max_phys_sect = 64, +}; +#else +static struct nvm_dev_ops null_lnvm_dev_ops; +#endif /* CONFIG_NVM */ + static int null_open(struct block_device *bdev, fmode_t mode) { return 0; @@ -575,11 +708,6 @@ static int null_add_dev(void) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); - disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { - rv = -ENOMEM; - goto out_cleanup_blk_queue; - } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -589,6 +717,21 @@ static int null_add_dev(void) blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); + sprintf(nullb->disk_name, "nullb%d", nullb->index); + + if (use_lightnvm) { + rv = nvm_register(nullb->q, nullb->disk_name, + &null_lnvm_dev_ops); + if (rv) + goto out_cleanup_blk_queue; + goto done; + } + + disk = nullb->disk = alloc_disk_node(1, home_node); + if (!disk) { + rv = -ENOMEM; + goto out_cleanup_lightnvm; + } size = gb * 1024 * 1024 * 1024ULL; set_capacity(disk, size >> 9); @@ -598,10 +741,15 @@ static int null_add_dev(void) disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; - sprintf(disk->disk_name, "nullb%d", nullb->index); + strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + add_disk(disk); +done: return 0; +out_cleanup_lightnvm: + if (use_lightnvm) + nvm_unregister(nullb->disk_name); out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: @@ -625,6 +773,18 @@ static int __init null_init(void) bs = PAGE_SIZE; } + if (use_lightnvm && bs != 4096) { + pr_warn("null_blk: LightNVM only supports 4k block size\n"); + pr_warn("null_blk: defaults block size to 4k\n"); + bs = 4096; + } + + if (use_lightnvm && queue_mode != NULL_Q_MQ) { + pr_warn("null_blk: LightNVM only supported for blk-mq\n"); + pr_warn("null_blk: defaults queue mode to blk-mq\n"); + queue_mode = NULL_Q_MQ; + } + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", @@ -638,32 +798,31 @@ static int __init null_init(void) mutex_init(&lock); - /* Initialize a separate list for each CPU for issuing softirqs */ - for_each_possible_cpu(i) { - struct completion_queue *cq = &per_cpu(completion_queues, i); - - init_llist_head(&cq->list); - - if (irqmode != NULL_IRQ_TIMER) - continue; - - hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cq->timer.function = null_cmd_timer_expired; - } - null_major = register_blkdev(0, "nullb"); if (null_major < 0) return null_major; + if (use_lightnvm) { + ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), + 0, 0, NULL); + if (!ppa_cache) { + pr_err("null_blk: unable to create ppa cache\n"); + return -ENOMEM; + } + } + for (i = 0; i < nr_devices; i++) { if (null_add_dev()) { unregister_blkdev(null_major, "nullb"); - return -EINVAL; + goto err_ppa; } } pr_info("null: module loaded\n"); return 0; +err_ppa: + kmem_cache_destroy(ppa_cache); + return -EINVAL; } static void __exit null_exit(void) @@ -678,6 +837,8 @@ static void __exit null_exit(void) null_del_dev(nullb); } mutex_unlock(&lock); + + kmem_cache_destroy(ppa_cache); } module_init(null_init); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 235708c7c46e..81ea69fee7ca 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work) goto err_rq; } img_request->rq = rq; + snapc = NULL; /* img_request consumes a ref */ if (op_type == OBJ_OP_DISCARD) result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index 9f1856948758..bf500e0e7362 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = { module_platform_driver(omap_ocp2scp_driver); -MODULE_ALIAS("platform: omap-ocp2scp"); +MODULE_ALIAS("platform:omap-ocp2scp"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP OCP2SCP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 8014c2307332..235a1ba73d92 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -202,7 +202,7 @@ config ARM_SA1110_CPUFREQ config ARM_SCPI_CPUFREQ tristate "SCPI based CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL + depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI help This adds the CPUfreq driver support for ARM big.LITTLE platforms using SCPI protocol for CPU power management. diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index e8cb334094b0..7c0bdfb1a2ca 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->max = cpu->perf_caps.highest_perf; policy->cpuinfo.min_freq = policy->min; policy->cpuinfo.max_freq = policy->max; + policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) cpumask_copy(policy->cpus, cpu->shared_cpu_map); - else { + else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ pr_debug("Unsupported CPU co-ord type\n"); return -EFAULT; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7c48e7316d91..8412ce5f93a7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) new_policy.governor = gov; - /* Use the default policy if its valid. */ - if (cpufreq_driver->setpolicy) - cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); - + /* Use the default policy if there is no last_policy. */ + if (cpufreq_driver->setpolicy) { + if (policy->last_policy) + new_policy.policy = policy->last_policy; + else + cpufreq_parse_governor(gov->name, &new_policy.policy, + NULL); + } /* set default policy */ return cpufreq_set_policy(policy, &new_policy); } @@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu) if (has_target()) strncpy(policy->last_governor, policy->governor->name, CPUFREQ_NAME_LEN); + else + policy->last_policy = policy->policy; } else if (cpu == policy->cpu) { /* Nominate new CPU */ policy->cpu = cpumask_any(policy->cpus); @@ -1401,13 +1407,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) } cpumask_clear_cpu(cpu, policy->real_cpus); + remove_cpu_dev_symlink(policy, cpu); - if (cpumask_empty(policy->real_cpus)) { + if (cpumask_empty(policy->real_cpus)) cpufreq_policy_free(policy, true); - return; - } - - remove_cpu_dev_symlink(policy, cpu); } static void handle_update(struct work_struct *work) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 001a532e342e..4d07cbd2b23c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1101,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) policy->max >= policy->cpuinfo.max_freq) { pr_debug("intel_pstate: set performance\n"); limits = &performance_limits; + if (hwp_active) + intel_pstate_hwp_set(); return 0; } @@ -1108,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits = &powersave_limits; limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); - limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; + limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, + policy->cpuinfo.max_freq); limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); /* Normalize user input to [min_policy_pct, max_policy_pct] */ @@ -1120,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_sysfs_pct); limits->max_perf_pct = max(limits->min_policy_pct, limits->max_perf_pct); + limits->max_perf = round_up(limits->max_perf, 8); /* Make sure min_perf_pct <= max_perf_pct */ limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 73ef49922788..7038f364acb5 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req, processed += to_process; } while (processed < nbytes); - rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, + rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index eee624f589b6..abd465f479c4 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -21,6 +21,7 @@ #include <crypto/internal/aead.h> #include <crypto/aes.h> +#include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> @@ -418,7 +419,7 @@ mac: itag, req->src, req->assoclen + nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); - rc = memcmp(itag, otag, + rc = crypto_memneq(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 46f531e19ccf..b6f9f42e2985 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, } else oicv = (char *)&edesc->link_tbl[0]; - err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; + err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; } kfree(edesc); diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c index 6ed7c0fb3378..6b186829087c 100644 --- a/drivers/gpio/gpio-74xx-mmio.c +++ b/drivers/gpio/gpio-74xx-mmio.c @@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int mmio_74xx_gpio_probe(struct platform_device *pdev) { - const struct of_device_id *of_id = - of_match_device(mmio_74xx_gpio_ids, &pdev->dev); + const struct of_device_id *of_id; struct mmio_74xx_gpio_priv *priv; struct resource *res; void __iomem *dat; int err; + of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev); + if (!of_id) + return -ENODEV; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 56d2d026e62e..f7fbb46d5d79 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) /* MPUIO is a bit different, reading IRQ status clears it */ if (bank->is_mpuio) { irqc->irq_ack = dummy_irq_chip.irq_ack; - irqc->irq_mask = irq_gc_mask_set_bit; - irqc->irq_unmask = irq_gc_mask_clr_bit; if (!bank->regs->wkup_en) irqc->irq_set_wake = NULL; } diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index 171a6389f9ce..52b447c071cb 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c @@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev) const struct palmas_device_data *dev_data; match = of_match_device(of_palmas_gpio_match, &pdev->dev); + if (!match) + return -ENODEV; dev_data = match->data; if (!dev_data) dev_data = &palmas_dev_data; diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 045a952576c7..7b25fdf64802 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids); static int syscon_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); + const struct of_device_id *of_id; struct syscon_gpio_priv *priv; struct device_node *np = dev->of_node; int ret; + of_id = of_match_device(syscon_gpio_ids, dev); + if (!of_id) + return -ENODEV; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 027e5f47dd28..896bf29776b0 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable) } #endif +#ifdef CONFIG_DEBUG_FS + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static int dbg_gpio_show(struct seq_file *s, void *unused) +{ + int i; + int j; + + for (i = 0; i < tegra_gpio_bank_count; i++) { + for (j = 0; j < 4; j++) { + int gpio = tegra_gpio_compose(i, j, 0); + seq_printf(s, + "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", + i, j, + tegra_gpio_readl(GPIO_CNF(gpio)), + tegra_gpio_readl(GPIO_OE(gpio)), + tegra_gpio_readl(GPIO_OUT(gpio)), + tegra_gpio_readl(GPIO_IN(gpio)), + tegra_gpio_readl(GPIO_INT_STA(gpio)), + tegra_gpio_readl(GPIO_INT_ENB(gpio)), + tegra_gpio_readl(GPIO_INT_LVL(gpio))); + } + } + return 0; +} + +static int dbg_gpio_open(struct inode *inode, struct file *file) +{ + return single_open(file, dbg_gpio_show, &inode->i_private); +} + +static const struct file_operations debug_fops = { + .open = dbg_gpio_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void tegra_gpio_debuginit(void) +{ + (void) debugfs_create_file("tegra_gpio", S_IRUGO, + NULL, NULL, &debug_fops); +} + +#else + +static inline void tegra_gpio_debuginit(void) +{ +} + +#endif + static struct irq_chip tegra_gpio_irq_chip = { .name = "GPIO", .irq_ack = tegra_gpio_irq_ack, @@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) spin_lock_init(&bank->lvl_lock[j]); } + tegra_gpio_debuginit(); + return 0; } @@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void) return platform_driver_register(&tegra_gpio_driver); } postcore_initcall(tegra_gpio_init); - -#ifdef CONFIG_DEBUG_FS - -#include <linux/debugfs.h> -#include <linux/seq_file.h> - -static int dbg_gpio_show(struct seq_file *s, void *unused) -{ - int i; - int j; - - for (i = 0; i < tegra_gpio_bank_count; i++) { - for (j = 0; j < 4; j++) { - int gpio = tegra_gpio_compose(i, j, 0); - seq_printf(s, - "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", - i, j, - tegra_gpio_readl(GPIO_CNF(gpio)), - tegra_gpio_readl(GPIO_OE(gpio)), - tegra_gpio_readl(GPIO_OUT(gpio)), - tegra_gpio_readl(GPIO_IN(gpio)), - tegra_gpio_readl(GPIO_INT_STA(gpio)), - tegra_gpio_readl(GPIO_INT_ENB(gpio)), - tegra_gpio_readl(GPIO_INT_LVL(gpio))); - } - } - return 0; -} - -static int dbg_gpio_open(struct inode *inode, struct file *file) -{ - return single_open(file, dbg_gpio_show, &inode->i_private); -} - -static const struct file_operations debug_fops = { - .open = dbg_gpio_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init tegra_gpio_debuginit(void) -{ - (void) debugfs_create_file("tegra_gpio", S_IRUGO, - NULL, NULL, &debug_fops); - return 0; -} -late_initcall(tegra_gpio_debuginit); -#endif diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a18f00fc1bb8..2a91f3287e3b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) for (i = 0; i != chip->ngpio; ++i) { struct gpio_desc *gpio = &chip->desc[i]; - if (!gpio->name) + if (!gpio->name || !name) continue; if (!strcmp(gpio->name, name)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 306f75700bf8..5a5f04d0902d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping { /* bo virtual addresses in a specific vm */ struct amdgpu_bo_va { + struct mutex mutex; /* protected by bo being reserved */ struct list_head bo_list; struct fence *last_pt_update; @@ -538,6 +539,7 @@ struct amdgpu_bo { /* Constant after initialization */ struct amdgpu_device *adev; struct drm_gem_object gem_base; + struct amdgpu_bo *parent; struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; @@ -928,8 +930,6 @@ struct amdgpu_vm_id { }; struct amdgpu_vm { - struct mutex mutex; - struct rb_root va; /* protecting invalidated */ @@ -956,6 +956,8 @@ struct amdgpu_vm { struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; /* for interval tree */ spinlock_t it_lock; + /* protecting freed */ + spinlock_t freed_lock; }; struct amdgpu_vm_manager { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3afcf0237c25..4f352ec9dec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } p->uf.bo = gem_to_amdgpu_bo(gobj); + amdgpu_bo_ref(p->uf.bo); + drm_gem_object_unreference_unlocked(gobj); p->uf.offset = fence_data->offset; } else { ret = -EINVAL; @@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo amdgpu_ib_free(parser->adev, &parser->ibs[i]); kfree(parser->ibs); if (parser->uf.bo) - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); + amdgpu_bo_unref(&parser->uf.bo); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) amdgpu_ib_free(job->adev, &job->ibs[i]); kfree(job->ibs); if (job->uf.bo) - drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); + amdgpu_bo_unref(&job->uf.bo); return 0; } @@ -784,8 +786,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; union drm_amdgpu_cs *cs = data; - struct amdgpu_fpriv *fpriv = filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; int i, r; @@ -803,7 +803,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_handle_lockup(adev, r); return r; } - mutex_lock(&vm->mutex); r = amdgpu_cs_parser_relocs(&parser); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); @@ -888,7 +887,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); - mutex_unlock(&vm->mutex); r = amdgpu_cs_handle_lockup(adev, r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e173a5a02f0d..acd066d0a805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; unsigned i; + int vpos, hpos, stat, min_udelay; + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; amdgpu_flip_wait_fence(adev, &work->excl); for (i = 0; i < work->shared_count; ++i) @@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); + /* If this happens to execute within the "virtually extended" vblank + * interval before the start of the real vblank interval then it needs + * to delay programming the mmio flip until the real vblank is entered. + * This prevents completing a flip too early due to the way we fudge + * our vblank counter and vblank timestamps in order to work around the + * problem that the hw fires vblank interrupts before actual start of + * vblank (when line buffer refilling is done for a frame). It + * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for + * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. + * + * In practice this won't execute very often unless on very fast + * machines because the time window for this to happen is very small. + */ + for (;;) { + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank + * start in hpos, and to the "fudged earlier" vblank start in + * vpos. + */ + stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, + GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode); + + if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || + !(vpos >= 0 && hpos <= 0)) + break; + + /* Sleep at least until estimated real start of hw vblank */ + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); + usleep_range(min_udelay, 2 * min_udelay); + spin_lock_irqsave(&crtc->dev->event_lock, flags); + }; + /* do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ @@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) } else DRM_ERROR("failed to reserve buffer after flip\n"); - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + amdgpu_bo_unref(&work->old_rbo); kfree(work->shared); kfree(work); } @@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, obj = old_amdgpu_fb->obj; /* take a reference to the old object */ - drm_gem_object_reference(obj); work->old_rbo = gem_to_amdgpu_bo(obj); + amdgpu_bo_ref(work->old_rbo); new_amdgpu_fb = to_amdgpu_framebuffer(fb); obj = new_amdgpu_fb->obj; @@ -222,7 +259,7 @@ pflip_cleanup: amdgpu_bo_unreserve(new_rbo); cleanup: - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + amdgpu_bo_unref(&work->old_rbo); fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) fence_put(work->shared[i]); @@ -481,7 +518,7 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { int amdgpu_framebuffer_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -498,7 +535,7 @@ amdgpu_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * amdgpu_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; @@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * \param dev Device to query. * \param pipe Crtc to query. * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * For driver internal use only also supports these flags: + * + * USE_REAL_VBLANKSTART to use the real start of vblank instead + * of a fudged earlier start of vblank. + * + * GET_DISTANCE_TO_VBLANKSTART to return distance to the + * fudged earlier start of vblank in *vpos and the distance + * to true start of vblank in *hpos. + * * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * \param *stime Target location for timestamp taken immediately before @@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, vbl_end = 0; } + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; + } + + /* Fudge vblank to start a few scanlines earlier to handle the + * problem that vblank irqs fire a few scanlines before start + * of vblank. Some driver internal callers need the true vblank + * start to be used and signal this via the USE_REAL_VBLANKSTART flag. + * + * The cause of the "early" vblank irq is that the irq is triggered + * by the line buffer logic when the line buffer read position enters + * the vblank, whereas our crtc scanout position naturally lags the + * line buffer read position. + */ + if (!(flags & USE_REAL_VBLANKSTART)) + vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; + /* Test scanout position against vblank region. */ if ((*vpos < vbl_start) && (*vpos >= vbl_end)) in_vbl = false; + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_IN_VBLANK; + + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from fudged earlier vbl_start */ + *vpos -= vbl_start; + return ret; + } + /* Check if inside vblank area and apply corrective offsets: * vpos will then be >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until @@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* Correct for shifted end of vbl at vbl_end. */ *vpos = *vpos - vbl_end; - /* In vblank? */ - if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; - - /* Is vpos outside nominal vblank area, but less than - * 1/100 of a frame height away from start of vblank? - * If so, assume this isn't a massively delayed vblank - * interrupt, but a vblank interrupt that fired a few - * microseconds before true start of vblank. Compensate - * by adding a full frame duration to the final timestamp. - * Happens, e.g., on ATI R500, R600. - * - * We only do this if DRM_CALLED_FROM_VBLIRQ. - */ - if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { - vbl_start = mode->crtc_vdisplay; - vtotal = mode->crtc_vtotal; - - if (vbl_start - *vpos < vtotal / 100) { - *vpos -= vtotal; - - /* Signal this correction as "applied". */ - ret |= 0x8; - } - } - return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 093a8c618931..6fcbbcc2e99e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -45,7 +45,6 @@ struct amdgpu_fbdev { struct drm_fb_helper helper; struct amdgpu_framebuffer rfb; - struct list_head fbdev_list; struct amdgpu_device *adev; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 00c5b580f56c..f6ea4b43a60c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, false); - if (r) { - mutex_unlock(&vm->mutex); + if (r) return r; - } bo_va = amdgpu_vm_bo_find(vm, rbo); if (!bo_va) { @@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ++bo_va->ref_count; } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); return 0; } @@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, true); if (r) { - mutex_unlock(&vm->mutex); dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); return; @@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, } } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); } static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) @@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, AMDGPU_GEM_USERPTR_REGISTER)) return -EINVAL; - if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || - !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { + if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( + !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || + !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { /* if we want to write to it we must require anonymous memory and install a MMU notifier */ @@ -553,7 +547,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - mutex_lock(&fpriv->vm.mutex); rbo = gem_to_amdgpu_bo(gobj); INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); @@ -568,7 +561,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) { - mutex_unlock(&fpriv->vm.mutex); drm_gem_object_unreference_unlocked(gobj); return r; } @@ -577,7 +569,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (!bo_va) { ttm_eu_backoff_reservation(&ticket, &list); drm_gem_object_unreference_unlocked(gobj); - mutex_unlock(&fpriv->vm.mutex); return -ENOENT; } @@ -602,7 +593,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); - mutex_unlock(&fpriv->vm.mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1618e2294a16..e23843f4d877 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev, u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) { struct amdgpu_device *adev = dev->dev_private; + int vpos, hpos, stat; + u32 count; if (pipe >= adev->mode_info.num_crtc) { DRM_ERROR("Invalid crtc %u\n", pipe); return -EINVAL; } - return amdgpu_display_vblank_get_counter(adev, pipe); + /* The hw increments its frame counter at start of vsync, not at start + * of vblank, as is required by DRM core vblank counter handling. + * Cook the hw count here to make it appear to the caller as if it + * incremented at start of vblank. We measure distance to start of + * vblank in vpos. vpos therefore will be >= 0 between start of vblank + * and start of vsync, so vpos >= 0 means to bump the hw frame counter + * result by 1 to give the proper appearance to caller. + */ + if (adev->mode_info.crtcs[pipe]) { + /* Repeat readout if needed to provide stable result if + * we cross start of vsync during the queries. + */ + do { + count = amdgpu_display_vblank_get_counter(adev, pipe); + /* Ask amdgpu_get_crtc_scanoutpos to return vpos as + * distance to start of vblank, instead of regular + * vertical scanout pos. + */ + stat = amdgpu_get_crtc_scanoutpos( + dev, pipe, GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &adev->mode_info.crtcs[pipe]->base.hwmode); + } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); + + if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { + DRM_DEBUG_VBL("Query failed! stat %d\n", stat); + } else { + DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", + pipe, vpos); + + /* Bump counter if we are at >= leading edge of vblank, + * but before vsync where vpos would turn negative and + * the hw counter really increments. + */ + if (vpos >= 0) + count++; + } + } else { + /* Fallback to use value as is. */ + count = amdgpu_display_vblank_get_counter(adev, pipe); + DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); + } + + return count; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b62c1710cab6..a53d756672fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -407,6 +407,7 @@ struct amdgpu_crtc { u32 line_time; u32 wm_low; u32 wm_high; + u32 lb_vblank_lead_lines; struct drm_display_mode hw_mode; }; @@ -528,6 +529,10 @@ struct amdgpu_framebuffer { #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ ((em) == ATOM_ENCODER_MODE_DP_MST)) +/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ +#define USE_REAL_VBLANKSTART (1 << 30) +#define GET_DISTANCE_TO_VBLANKSTART (1 << 31) + void amdgpu_link_encoder_connector(struct drm_device *dev); struct drm_connector * @@ -551,7 +556,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int amdgpu_framebuffer_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0d524384ff79..c3ce103b6a33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) list_del_init(&bo->list); mutex_unlock(&bo->adev->gem.mutex); drm_gem_object_release(&bo->gem_base); + amdgpu_bo_unref(&bo->parent); kfree(bo->metadata); kfree(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d4bac5f49939..8a1752ff3d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); int r; - if (gtt->userptr) - amdgpu_ttm_tt_pin_userptr(ttm); - + if (gtt->userptr) { + r = amdgpu_ttm_tt_pin_userptr(ttm); + if (r) { + DRM_ERROR("failed to pin userptr\n"); + return r; + } + } gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", @@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, if (mem && mem->mem_type != TTM_PL_SYSTEM) flags |= AMDGPU_PTE_VALID; - if (mem && mem->mem_type == TTM_PL_TT) + if (mem && mem->mem_type == TTM_PL_TT) { flags |= AMDGPU_PTE_SYSTEM; - if (!ttm || ttm->caching_state == tt_cached) - flags |= AMDGPU_PTE_SNOOPED; + if (ttm->caching_state == tt_cached) + flags |= AMDGPU_PTE_SNOOPED; + } if (adev->asic_type >= CHIP_TOPAZ) flags |= AMDGPU_PTE_EXECUTABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 03f0c3bae516..a745eeeb5d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000030; /* len */ + if ((ring->adev->vce.fw_version >> 24) >= 52) + ib->ptr[ib->length_dw++] = 0x00000040; /* len */ + else + ib->ptr[ib->length_dw++] = 0x00000030; /* len */ ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000042; @@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000100; ib->ptr[ib->length_dw++] = 0x0000000c; ib->ptr[ib->length_dw++] = 0x00000000; + if ((ring->adev->vce.fw_version >> 24) >= 52) { + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + } ib->ptr[ib->length_dw++] = 0x00000014; /* len */ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 159ce54bbd8d..b53d273eb7a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; int r; + spin_lock(&vm->freed_lock); while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - + spin_unlock(&vm->freed_lock); r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); kfree(mapping); if (r) return r; + spin_lock(&vm->freed_lock); } + spin_unlock(&vm->freed_lock); + return 0; } @@ -922,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - + mutex_lock(&bo_va->mutex); r = amdgpu_vm_bo_update(adev, bo_va, NULL); + mutex_unlock(&bo_va->mutex); if (r) return r; @@ -967,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->vm_status); - + mutex_init(&bo_va->mutex); list_add_tail(&bo_va->bo_list, &bo->va); return bo_va; @@ -1045,7 +1050,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; + mutex_lock(&bo_va->mutex); list_add(&mapping->list, &bo_va->invalids); + mutex_unlock(&bo_va->mutex); spin_lock(&vm->it_lock); interval_tree_insert(&mapping->it, &vm->va); spin_unlock(&vm->it_lock); @@ -1076,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (r) goto error_free; + /* Keep a reference to the page table to avoid freeing + * them up in the wrong order. + */ + pt->parent = amdgpu_bo_ref(vm->page_directory); + r = amdgpu_vm_clear_bo(adev, pt); if (r) { amdgpu_bo_unref(&pt); @@ -1121,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; - + mutex_lock(&bo_va->mutex); list_for_each_entry(mapping, &bo_va->valids, list) { if (mapping->it.start == saddr) break; @@ -1135,20 +1147,25 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, break; } - if (&mapping->list == &bo_va->invalids) + if (&mapping->list == &bo_va->invalids) { + mutex_unlock(&bo_va->mutex); return -ENOENT; + } } - + mutex_unlock(&bo_va->mutex); list_del(&mapping->list); spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); - if (valid) + if (valid) { + spin_lock(&vm->freed_lock); list_add(&mapping->list, &vm->freed); - else + spin_unlock(&vm->freed_lock); + } else { kfree(mapping); + } return 0; } @@ -1181,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, interval_tree_remove(&mapping->it, &vm->va); spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); + spin_lock(&vm->freed_lock); list_add(&mapping->list, &vm->freed); + spin_unlock(&vm->freed_lock); } list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_del(&mapping->list); @@ -1190,8 +1209,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, spin_unlock(&vm->it_lock); kfree(mapping); } - fence_put(bo_va->last_pt_update); + mutex_destroy(&bo_va->mutex); kfree(bo_va); } @@ -1236,13 +1255,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->ids[i].id = 0; vm->ids[i].flushed_updates = NULL; } - mutex_init(&vm->mutex); vm->va = RB_ROOT; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); spin_lock_init(&vm->it_lock); + spin_lock_init(&vm->freed_lock); pd_size = amdgpu_vm_directory_size(adev); pd_entries = amdgpu_vm_num_pdes(adev); @@ -1320,7 +1339,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) fence_put(vm->ids[i].flushed_updates); } - mutex_destroy(&vm->mutex); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index cb0f7747e3dc..4dcc8fba5792 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; @@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, (adev->mode_info.disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, amdgpu_crtc->line_time = line_time; amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 5af3721851d6..8f1e51128b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; @@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, (adev->mode_info.disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, amdgpu_crtc->line_time = line_time; amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4f7b49a6dc50..42d954dc436d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; @@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, (adev->mode_info.disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, amdgpu_crtc->line_time = line_time; amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7427d8cd4c43..ed8abb58a785 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index cb0e50ebb528..d39028440814 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL4, tmp); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6a52db6ad8d7..370c6c9d81c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -40,6 +40,9 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) /* set BUSY flag */ WREG32_P(mmVCE_STATUS, 1, ~1); - - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, + ~VCE_VCPU_CNTL__CLK_EN_MASK); WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, @@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_VM_CTRL, 0); - - WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); + if (adev->asic_type >= CHIP_STONEY) { + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); + } else + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V3_0_FW_SIZE; WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); @@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: VCE\n"); + + WREG32_P(mmVCE_SYS_INT_STATUS, + VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, + ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + switch (entry->src_data) { case 0: amdgpu_fence_process(&adev->vce.ring[0]); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ea30d6ad4c13..3a4820e863ec 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -30,8 +30,7 @@ #define CREATE_TRACE_POINTS #include "gpu_sched_trace.h" -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); struct kmem_cache *sched_fence_slab; @@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, } /** - * Select next job from a specified run queue with round robin policy. - * Return NULL if nothing available. + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. */ -static struct amd_sched_job * -amd_sched_rq_select_job(struct amd_sched_rq *rq) +static struct amd_sched_entity * +amd_sched_rq_select_entity(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } } } list_for_each_entry(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } if (entity == rq->current_entity) @@ -177,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) } /** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) +{ + if (kfifo_is_empty(&entity->job_queue)) + return false; + + if (ACCESS_ONCE(entity->dependency)) + return false; + + return true; +} + +/** * Destroy a context entity * * @sched Pointer to scheduler instance @@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct fence * fence = entity->dependency; + struct amd_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + fence_put(entity->dependency); + return false; + } + + s_fence = to_amd_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + /* Fence is from the same scheduler */ + if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { + /* Ignore it when it is already scheduled */ + fence_put(entity->dependency); + return false; + } + + /* Wait for fence to be scheduled */ + entity->cb.func = amd_sched_entity_wakeup; + list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); + return true; + } + + if (!fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + return true; + + fence_put(entity->dependency); + return false; +} + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; struct amd_sched_job *sched_job; - if (ACCESS_ONCE(entity->dependency)) - return NULL; - if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) { - - if (entity->dependency->context == entity->fence_context) { - /* We can ignore fences from ourself */ - fence_put(entity->dependency); - continue; - } - - if (fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - fence_put(entity->dependency); - else + while ((entity->dependency = sched->ops->dependency(sched_job))) + if (amd_sched_entity_add_dependency_cb(entity)) return NULL; - } return sched_job; } @@ -250,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) */ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) { + struct amd_gpu_scheduler *sched = sched_job->sched; struct amd_sched_entity *entity = sched_job->s_entity; bool added, first = false; @@ -264,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) /* first job wakes up scheduler */ if (first) - amd_sched_wakeup(sched_job->sched); + amd_sched_wakeup(sched); return added; } @@ -280,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; + trace_amd_sched_job(sched_job); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); - trace_amd_sched_job(sched_job); } /** @@ -304,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) } /** - * Select next to run + * Select next entity to process */ -static struct amd_sched_job * -amd_sched_select_job(struct amd_gpu_scheduler *sched) +static struct amd_sched_entity * +amd_sched_select_entity(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *sched_job; + struct amd_sched_entity *entity; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - sched_job = amd_sched_rq_select_job(&sched->kernel_rq); - if (sched_job == NULL) - sched_job = amd_sched_rq_select_job(&sched->sched_rq); + entity = amd_sched_rq_select_entity(&sched->kernel_rq); + if (entity == NULL) + entity = amd_sched_rq_select_entity(&sched->sched_rq); - return sched_job; + return entity; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) @@ -381,13 +420,16 @@ static int amd_sched_main(void *param) unsigned long flags; wait_event_interruptible(sched->wake_up_worker, - kthread_should_stop() || - (sched_job = amd_sched_select_job(sched))); + (entity = amd_sched_select_entity(sched)) || + kthread_should_stop()); + if (!entity) + continue; + + sched_job = amd_sched_entity_pop_job(entity); if (!sched_job) continue; - entity = sched_job->s_entity; s_fence = sched_job->s_fence; if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { @@ -400,6 +442,7 @@ static int amd_sched_main(void *param) atomic_inc(&sched->hw_rq_count); fence = sched->ops->run_job(sched_job); + amd_sched_fence_scheduled(s_fence); if (fence) { r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 939692b14f4b..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,6 +27,8 @@ #include <linux/kfifo.h> #include <linux/fence.h> +#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS + struct amd_gpu_scheduler; struct amd_sched_rq; @@ -68,6 +70,7 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; struct fence_cb cb; + struct list_head scheduled_cb; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; @@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_scheduled(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence); - #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 8d2130b9ff05..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; + + INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; fence->sched = s_entity->sched; spin_lock_init(&fence->lock); @@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) FENCE_TRACE(&fence->base, "was already signaled\n"); } +void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +{ + struct fence_cb *cur, *tmp; + + set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); + list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { + list_del_init(&cur->node); + cur->func(&s_fence->base, cur); + } +} + static const char *amd_sched_fence_get_driver_name(struct fence *fence) { return "amd_sched"; diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index 1c90969def3e..5fa4bf20b232 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -35,7 +35,7 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = { }; struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) + const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) { struct armada_framebuffer *dfb; uint8_t format, config; @@ -101,7 +101,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, } static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, - struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode) + struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode) { struct armada_gem_object *obj; struct armada_framebuffer *dfb; diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h index ce3f12ebfc53..48073c4f54d8 100644 --- a/drivers/gpu/drm/armada/armada_fb.h +++ b/drivers/gpu/drm/armada/armada_fb.h @@ -19,6 +19,6 @@ struct armada_framebuffer { #define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj struct armada_framebuffer *armada_framebuffer_create(struct drm_device *, - struct drm_mode_fb_cmd2 *, struct armada_gem_object *); + const struct drm_mode_fb_cmd2 *, struct armada_gem_object *); #endif diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 05f6522c0457..eb5715994ac2 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -256,7 +256,6 @@ struct ast_framebuffer { struct ast_fbdev { struct drm_fb_helper helper; struct ast_framebuffer afb; - struct list_head fbdev_list; void *sysram; int size; struct ttm_bo_kmap_obj mapping; @@ -309,7 +308,7 @@ extern void ast_mode_fini(struct drm_device *dev); int ast_framebuffer_init(struct drm_device *dev, struct ast_framebuffer *ast_fb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); int ast_fbdev_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index a37e7ea4a00c..5320f8c57884 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -163,7 +163,7 @@ static struct fb_ops astfb_ops = { }; static int astfb_create_object(struct ast_fbdev *afbdev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct drm_device *dev = afbdev->helper.dev; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 541a610667ad..9759009d1da3 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -309,7 +309,7 @@ static const struct drm_framebuffer_funcs ast_fb_funcs = { int ast_framebuffer_init(struct drm_device *dev, struct ast_framebuffer *ast_fb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -327,7 +327,7 @@ int ast_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * ast_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct ast_framebuffer *ast_fb; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 244df0a440b7..816895447155 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -402,7 +402,7 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data) } static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev, - struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { return drm_fb_cma_create(dev, file_priv, mode_cmd); } diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 71f2687fc3cc..19b5adaebe24 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -149,7 +149,7 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, int bochs_framebuffer_init(struct drm_device *dev, struct bochs_framebuffer *gfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); int bochs_bo_unpin(struct bochs_bo *bo); diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index 09a0637aab3e..7520bf81fc25 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -34,7 +34,7 @@ static struct fb_ops bochsfb_ops = { }; static int bochsfb_create_object(struct bochs_device *bochs, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct drm_device *dev = bochs->dev; diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index f69e6bf9bb0e..d812ad014da5 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -484,7 +484,7 @@ static const struct drm_framebuffer_funcs bochs_fb_funcs = { int bochs_framebuffer_init(struct drm_device *dev, struct bochs_framebuffer *gfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -502,7 +502,7 @@ int bochs_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * bochs_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct bochs_framebuffer *bochs_fb; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 705061537a27..b774d637a00f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -153,7 +153,6 @@ struct cirrus_device { struct cirrus_fbdev { struct drm_fb_helper helper; struct cirrus_framebuffer gfb; - struct list_head fbdev_list; void *sysram; int size; int x1, y1, x2, y2; /* dirty rect */ @@ -207,7 +206,7 @@ int cirrus_dumb_create(struct drm_file *file, int cirrus_framebuffer_init(struct drm_device *dev, struct cirrus_framebuffer *gfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 589103bcc06c..3b5be7272357 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -135,7 +135,7 @@ static struct fb_ops cirrusfb_ops = { }; static int cirrusfb_create_object(struct cirrus_fbdev *afbdev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct drm_device *dev = afbdev->helper.dev; diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 055fd86ba717..0907715e90fd 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -29,7 +29,7 @@ static const struct drm_framebuffer_funcs cirrus_fb_funcs = { int cirrus_framebuffer_init(struct drm_device *dev, struct cirrus_framebuffer *gfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -47,7 +47,7 @@ int cirrus_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * cirrus_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct cirrus_device *cdev = dev->dev_private; struct drm_gem_object *obj; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index aeee083c7f95..ef5f7663a718 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -316,8 +316,7 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) return 0; - if (state->mode_blob) - drm_property_unreference_blob(state->mode_blob); + drm_property_unreference_blob(state->mode_blob); state->mode_blob = NULL; if (mode) { @@ -363,8 +362,7 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, if (blob == state->mode_blob) return 0; - if (state->mode_blob) - drm_property_unreference_blob(state->mode_blob); + drm_property_unreference_blob(state->mode_blob); state->mode_blob = NULL; if (blob) { @@ -419,8 +417,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_property_blob *mode = drm_property_lookup_blob(dev, val); ret = drm_atomic_set_mode_prop_for_crtc(state, mode); - if (mode) - drm_property_unreference_blob(mode); + drm_property_unreference_blob(mode); return ret; } else if (crtc->funcs->atomic_set_property) @@ -1191,12 +1188,7 @@ void drm_atomic_legacy_backoff(struct drm_atomic_state *state) retry: drm_modeset_backoff(state->acquire_ctx); - ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, - state->acquire_ctx); - if (ret) - goto retry; - ret = drm_modeset_lock_all_crtcs(state->dev, - state->acquire_ctx); + ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); if (ret) goto retry; } @@ -1433,7 +1425,7 @@ static int atomic_set_prop(struct drm_atomic_state *state, } /** - * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers. + * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers. * * @dev: drm device to check. * @plane_mask: plane mask for planes that were updated. diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index e5aec45bf985..74a5fc4deef6 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -80,6 +80,27 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, } } +static bool +check_pending_encoder_assignment(struct drm_atomic_state *state, + struct drm_encoder *new_encoder, + struct drm_connector *new_connector) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + int i; + + for_each_connector_in_state(state, connector, conn_state, i) { + if (conn_state->best_encoder != new_encoder) + continue; + + /* encoder already assigned and we're trying to re-steal it! */ + if (connector->state->best_encoder != conn_state->best_encoder) + return false; + } + + return true; +} + static struct drm_crtc * get_current_crtc_for_encoder(struct drm_device *dev, struct drm_encoder *encoder) @@ -229,6 +250,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) return 0; } + if (!check_pending_encoder_assignment(state, new_encoder, connector)) { + DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n", + connector->base.id, + connector->name); + return -EINVAL; + } + encoder_crtc = get_current_crtc_for_encoder(state->dev, new_encoder); @@ -1342,6 +1370,49 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); /** + * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes + * @crtc: CRTC + * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks + * + * Disables all planes associated with the given CRTC. This can be + * used for instance in the CRTC helper disable callback to disable + * all planes before shutting down the display pipeline. + * + * If the atomic-parameter is set the function calls the CRTC's + * atomic_begin hook before and atomic_flush hook after disabling the + * planes. + * + * It is a bug to call this function without having implemented the + * ->atomic_disable() plane hook. + */ +void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, + bool atomic) +{ + const struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + struct drm_plane *plane; + + if (atomic && crtc_funcs && crtc_funcs->atomic_begin) + crtc_funcs->atomic_begin(crtc, NULL); + + drm_for_each_plane(plane, crtc->dev) { + const struct drm_plane_helper_funcs *plane_funcs = + plane->helper_private; + + if (plane->state->crtc != crtc || !plane_funcs) + continue; + + WARN_ON(!plane_funcs->atomic_disable); + if (plane_funcs->atomic_disable) + plane_funcs->atomic_disable(plane, NULL); + } + + if (atomic && crtc_funcs && crtc_funcs->atomic_flush) + crtc_funcs->atomic_flush(crtc, NULL); +} +EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc); + +/** * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit * @dev: DRM device * @old_state: atomic state object with old state structures @@ -1485,12 +1556,12 @@ retry: drm_atomic_set_fb_for_plane(plane_state, fb); plane_state->crtc_x = crtc_x; plane_state->crtc_y = crtc_y; - plane_state->crtc_h = crtc_h; plane_state->crtc_w = crtc_w; + plane_state->crtc_h = crtc_h; plane_state->src_x = src_x; plane_state->src_y = src_y; - plane_state->src_h = src_h; plane_state->src_w = src_w; + plane_state->src_h = src_h; if (plane == crtc->cursor) state->legacy_cursor_update = true; @@ -1609,12 +1680,12 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane, drm_atomic_set_fb_for_plane(plane_state, NULL); plane_state->crtc_x = 0; plane_state->crtc_y = 0; - plane_state->crtc_h = 0; plane_state->crtc_w = 0; + plane_state->crtc_h = 0; plane_state->src_x = 0; plane_state->src_y = 0; - plane_state->src_h = 0; plane_state->src_w = 0; + plane_state->src_h = 0; return 0; } @@ -1797,16 +1868,16 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, drm_atomic_set_fb_for_plane(primary_state, set->fb); primary_state->crtc_x = 0; primary_state->crtc_y = 0; - primary_state->crtc_h = vdisplay; primary_state->crtc_w = hdisplay; + primary_state->crtc_h = vdisplay; primary_state->src_x = set->x << 16; primary_state->src_y = set->y << 16; if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { - primary_state->src_h = hdisplay << 16; primary_state->src_w = vdisplay << 16; + primary_state->src_h = hdisplay << 16; } else { - primary_state->src_h = vdisplay << 16; primary_state->src_w = hdisplay << 16; + primary_state->src_h = vdisplay << 16; } commit: @@ -1818,6 +1889,161 @@ commit: } /** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector *conn; + int err; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + drm_for_each_connector(conn, dev) { + struct drm_crtc *crtc = conn->state->crtc; + struct drm_crtc_state *crtc_state; + + if (!crtc || conn->dpms != DRM_MODE_DPMS_ON) + continue; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + err = PTR_ERR(crtc_state); + goto free; + } + + crtc_state->active = false; + } + + err = drm_atomic_commit(state); + +free: + if (err < 0) + drm_atomic_state_free(state); + + return err; +} +EXPORT_SYMBOL(drm_atomic_helper_disable_all); + +/** + * drm_atomic_helper_suspend - subsystem-level suspend helper + * @dev: DRM device + * + * Duplicates the current atomic state, disables all active outputs and then + * returns a pointer to the original atomic state to the caller. Drivers can + * pass this pointer to the drm_atomic_helper_resume() helper upon resume to + * restore the output configuration that was active at the time the system + * entered suspend. + * + * Note that it is potentially unsafe to use this. The atomic state object + * returned by this function is assumed to be persistent. Drivers must ensure + * that this holds true. Before calling this function, drivers must make sure + * to suspend fbdev emulation so that nothing can be using the device. + * + * Returns: + * A pointer to a copy of the state before suspend on success or an ERR_PTR()- + * encoded error code on failure. Drivers should store the returned atomic + * state object and pass it to the drm_atomic_helper_resume() helper upon + * resume. + * + * See also: + * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(), + * drm_atomic_helper_resume() + */ +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + int err; + + drm_modeset_acquire_init(&ctx, 0); + +retry: + err = drm_modeset_lock_all_ctx(dev, &ctx); + if (err < 0) { + state = ERR_PTR(err); + goto unlock; + } + + state = drm_atomic_helper_duplicate_state(dev, &ctx); + if (IS_ERR(state)) + goto unlock; + + err = drm_atomic_helper_disable_all(dev, &ctx); + if (err < 0) { + drm_atomic_state_free(state); + state = ERR_PTR(err); + goto unlock; + } + +unlock: + if (PTR_ERR(state) == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_suspend); + +/** + * drm_atomic_helper_resume - subsystem-level resume helper + * @dev: DRM device + * @state: atomic state to resume to + * + * Calls drm_mode_config_reset() to synchronize hardware and software states, + * grabs all modeset locks and commits the atomic state object. This can be + * used in conjunction with the drm_atomic_helper_suspend() helper to + * implement suspend/resume for drivers that support atomic mode-setting. + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend() + */ +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_mode_config *config = &dev->mode_config; + int err; + + drm_mode_config_reset(dev); + drm_modeset_lock_all(dev); + state->acquire_ctx = config->acquire_ctx; + err = drm_atomic_commit(state); + drm_modeset_unlock_all(dev); + + return err; +} +EXPORT_SYMBOL(drm_atomic_helper_resume); + +/** * drm_atomic_helper_crtc_set_property - helper for crtc properties * @crtc: DRM crtc * @property: DRM property @@ -2184,7 +2410,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms); */ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) { - if (crtc->state && crtc->state->mode_blob) + if (crtc->state) drm_property_unreference_blob(crtc->state->mode_blob); kfree(crtc->state); crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL); @@ -2252,8 +2478,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { - if (state->mode_blob) - drm_property_unreference_blob(state->mode_blob); + drm_property_unreference_blob(state->mode_blob); } EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); @@ -2430,7 +2655,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); * @ctx: lock acquisition context * * Makes a copy of the current atomic state by looping over all objects and - * duplicating their respective states. + * duplicating their respective states. This is used for example by suspend/ + * resume support code to save the state prior to suspend such that it can + * be restored upon resume. * * Note that this treats atomic state as persistent between save and restore. * Drivers must make sure that this is possible and won't result in confusion @@ -2442,6 +2669,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); * Returns: * A pointer to the copy of the atomic state object on success or an * ERR_PTR()-encoded error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() */ struct drm_atomic_state * drm_atomic_helper_duplicate_state(struct drm_device *dev, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 24c5434abd1c..32dd134700bd 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -45,7 +45,7 @@ static struct drm_framebuffer * internal_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, + const struct drm_mode_fb_cmd2 *r, struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ @@ -3235,7 +3235,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) static struct drm_framebuffer * internal_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, + const struct drm_mode_fb_cmd2 *r, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index ef534758a02c..10d0989db273 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -818,7 +818,7 @@ EXPORT_SYMBOL(drm_helper_connector_dpms); * metadata fields. */ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { int i; @@ -855,6 +855,12 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); * due to slight differences in allocating shared resources when the * configuration is restored in a different order than when userspace set it up) * need to use their own restore logic. + * + * This function is deprecated. New drivers should implement atomic mode- + * setting and use the atomic suspend/resume helpers. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() */ void drm_helper_resume_force_mode(struct drm_device *dev) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9362609df38a..7dd6728dd092 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, goto out_unlock; } + if (!file_priv->allowed_master) { + ret = drm_new_set_master(dev, file_priv); + goto out_unlock; + } + file_priv->minor->master = drm_master_get(file_priv->master); file_priv->is_master = 1; if (dev->driver->master_set) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index d5d2c03fd136..c214f1246cb4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2545,6 +2545,33 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) return clock; } +static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, + unsigned int clock_tolerance) +{ + u8 mode; + + if (!to_match->clock) + return 0; + + for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { + const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; + unsigned int clock1, clock2; + + /* Check both 60Hz and 59.94Hz */ + clock1 = cea_mode->clock; + clock2 = cea_mode_alternate_clock(cea_mode); + + if (abs(to_match->clock - clock1) > clock_tolerance && + abs(to_match->clock - clock2) > clock_tolerance) + continue; + + if (drm_mode_equal_no_clocks(to_match, cea_mode)) + return mode + 1; + } + + return 0; +} + /** * drm_match_cea_mode - look for a CEA mode matching given mode * @to_match: display mode @@ -2609,6 +2636,33 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) return cea_mode_alternate_clock(hdmi_mode); } +static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, + unsigned int clock_tolerance) +{ + u8 mode; + + if (!to_match->clock) + return 0; + + for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { + const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; + unsigned int clock1, clock2; + + /* Make sure to also match alternate clocks */ + clock1 = hdmi_mode->clock; + clock2 = hdmi_mode_alternate_clock(hdmi_mode); + + if (abs(to_match->clock - clock1) > clock_tolerance && + abs(to_match->clock - clock2) > clock_tolerance) + continue; + + if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) + return mode + 1; + } + + return 0; +} + /* * drm_match_hdmi_mode - look for a HDMI mode matching given mode * @to_match: display mode @@ -3119,14 +3173,18 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) u8 mode_idx; const char *type; - mode_idx = drm_match_cea_mode(mode) - 1; + /* + * allow 5kHz clock difference either way to account for + * the 10kHz clock resolution limit of detailed timings. + */ + mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1; if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { type = "CEA"; cea_mode = &edid_cea_modes[mode_idx]; clock1 = cea_mode->clock; clock2 = cea_mode_alternate_clock(cea_mode); } else { - mode_idx = drm_match_hdmi_mode(mode) - 1; + mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1; if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { type = "HDMI"; cea_mode = &edid_4k_modes[mode_idx]; diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index c19a62561183..b7d5b848d2f8 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -74,7 +74,7 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = { }; static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, + const const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, unsigned int num_planes) { struct drm_fb_cma *fb_cma; @@ -107,7 +107,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, * checked before calling this function. */ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, - struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_fb_cma *fb_cma; struct drm_gem_cma_object *objs[4]; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c59ce4d0ef75..1ea8790e5090 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -126,6 +126,60 @@ static int drm_cpu_valid(void) } /** + * drm_new_set_master - Allocate a new master object and become master for the + * associated master realm. + * + * @dev: The associated device. + * @fpriv: File private identifying the client. + * + * This function must be called with dev::struct_mutex held. + * Returns negative error code on failure. Zero on success. + */ +int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) +{ + struct drm_master *old_master; + int ret; + + lockdep_assert_held_once(&dev->master_mutex); + + /* create a new master */ + fpriv->minor->master = drm_master_create(fpriv->minor); + if (!fpriv->minor->master) + return -ENOMEM; + + /* take another reference for the copy in the local file priv */ + old_master = fpriv->master; + fpriv->master = drm_master_get(fpriv->minor->master); + + if (dev->driver->master_create) { + ret = dev->driver->master_create(dev, fpriv->master); + if (ret) + goto out_err; + } + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, fpriv, true); + if (ret) + goto out_err; + } + + fpriv->is_master = 1; + fpriv->allowed_master = 1; + fpriv->authenticated = 1; + if (old_master) + drm_master_put(&old_master); + + return 0; + +out_err: + /* drop both references and restore old master on failure */ + drm_master_put(&fpriv->minor->master); + drm_master_put(&fpriv->master); + fpriv->master = old_master; + + return ret; +} + +/** * Called whenever a process opens /dev/drm. * * \param filp file pointer. @@ -172,6 +226,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) init_waitqueue_head(&priv->event_wait); priv->event_space = 4096; /* set aside 4k for event buffer */ + mutex_init(&priv->event_read_lock); + if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_open(dev, priv); @@ -189,35 +245,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) mutex_lock(&dev->master_mutex); if (drm_is_primary_client(priv) && !priv->minor->master) { /* create a new master */ - priv->minor->master = drm_master_create(priv->minor); - if (!priv->minor->master) { - ret = -ENOMEM; + ret = drm_new_set_master(dev, priv); + if (ret) goto out_close; - } - - priv->is_master = 1; - /* take another reference for the copy in the local file priv */ - priv->master = drm_master_get(priv->minor->master); - priv->authenticated = 1; - - if (dev->driver->master_create) { - ret = dev->driver->master_create(dev, priv->master); - if (ret) { - /* drop both references if this fails */ - drm_master_put(&priv->minor->master); - drm_master_put(&priv->master); - goto out_close; - } - } - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, priv, true); - if (ret) { - /* drop both references if this fails */ - drm_master_put(&priv->minor->master); - drm_master_put(&priv->master); - goto out_close; - } - } } else if (drm_is_primary_client(priv)) { /* get a reference to the master */ priv->master = drm_master_get(priv->minor->master); @@ -483,14 +513,28 @@ ssize_t drm_read(struct file *filp, char __user *buffer, { struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; - ssize_t ret = 0; + ssize_t ret; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - spin_lock_irq(&dev->event_lock); + ret = mutex_lock_interruptible(&file_priv->event_read_lock); + if (ret) + return ret; + for (;;) { - if (list_empty(&file_priv->event_list)) { + struct drm_pending_event *e = NULL; + + spin_lock_irq(&dev->event_lock); + if (!list_empty(&file_priv->event_list)) { + e = list_first_entry(&file_priv->event_list, + struct drm_pending_event, link); + file_priv->event_space += e->event->length; + list_del(&e->link); + } + spin_unlock_irq(&dev->event_lock); + + if (e == NULL) { if (ret) break; @@ -499,36 +543,36 @@ ssize_t drm_read(struct file *filp, char __user *buffer, break; } - spin_unlock_irq(&dev->event_lock); + mutex_unlock(&file_priv->event_read_lock); ret = wait_event_interruptible(file_priv->event_wait, !list_empty(&file_priv->event_list)); - spin_lock_irq(&dev->event_lock); - if (ret < 0) - break; - - ret = 0; + if (ret >= 0) + ret = mutex_lock_interruptible(&file_priv->event_read_lock); + if (ret) + return ret; } else { - struct drm_pending_event *e; - - e = list_first_entry(&file_priv->event_list, - struct drm_pending_event, link); - if (e->event->length + ret > count) + unsigned length = e->event->length; + + if (length > count - ret) { +put_back_event: + spin_lock_irq(&dev->event_lock); + file_priv->event_space -= length; + list_add(&e->link, &file_priv->event_list); + spin_unlock_irq(&dev->event_lock); break; + } - if (__copy_to_user_inatomic(buffer + ret, - e->event, e->event->length)) { + if (copy_to_user(buffer + ret, e->event, length)) { if (ret == 0) ret = -EFAULT; - break; + goto put_back_event; } - file_priv->event_space += e->event->length; - ret += e->event->length; - list_del(&e->link); + ret += length; e->destroy(e); } } - spin_unlock_irq(&dev->event_lock); + mutex_unlock(&file_priv->event_read_lock); return ret; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c7de454e8e88..2e10bba4468b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -244,8 +244,9 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) * @filp: drm file-private structure to use for the handle look up * @handle: userspace handle to delete * - * Removes the GEM handle from the @filp lookup table and if this is the last - * handle also cleans up linked resources like GEM names. + * Removes the GEM handle from the @filp lookup table which has been added with + * drm_gem_handle_create(). If this is the last handle also cleans up linked + * resources like GEM names. */ int drm_gem_handle_delete(struct drm_file *filp, u32 handle) @@ -314,6 +315,10 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy); * This expects the dev->object_name_lock to be held already and will drop it * before returning. Used to avoid races in establishing new handles when * importing an object from either an flink name or a dma-buf. + * + * Handles must be release again through drm_gem_handle_delete(). This is done + * when userspace closes @file_priv for all attached handles, or through the + * GEM_CLOSE ioctl for individual handles. */ int drm_gem_handle_create_tail(struct drm_file *file_priv, @@ -541,7 +546,17 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, } EXPORT_SYMBOL(drm_gem_put_pages); -/** Returns a reference to the object named by the handle. */ +/** + * drm_gem_object_lookup - look up a GEM object from it's handle + * @dev: DRM device + * @filp: DRM file private date + * @handle: userspace handle + * + * Returns: + * + * A reference to the object named by the handle if such exists on @filp, NULL + * otherwise. + */ struct drm_gem_object * drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, u32 handle) @@ -774,6 +789,13 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); +/** + * drm_gem_vm_open - vma->ops->open implementation for GEM + * @vma: VM area structure + * + * This function implements the #vm_operations_struct open() callback for GEM + * drivers. This must be used together with drm_gem_vm_close(). + */ void drm_gem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; @@ -782,6 +804,13 @@ void drm_gem_vm_open(struct vm_area_struct *vma) } EXPORT_SYMBOL(drm_gem_vm_open); +/** + * drm_gem_vm_close - vma->ops->close implementation for GEM + * @vma: VM area structure + * + * This function implements the #vm_operations_struct close() callback for GEM + * drivers. This must be used together with drm_gem_vm_open(). + */ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2151ea551d3b..607f493ae801 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, unsigned long seq, struct timeval *now) { - WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); + assert_spin_locked(&dev->event_lock); + e->event.sequence = seq; e->event.tv_sec = now->tv_sec; e->event.tv_usec = now->tv_usec; @@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev, } /** + * drm_arm_vblank_event - arm vblank event after pageflip + * @dev: DRM device + * @pipe: CRTC index + * @e: the event to prepare to send + * + * A lot of drivers need to generate vblank events for the very next vblank + * interrupt. For example when the page flip interrupt happens when the page + * flip gets armed, but not when it actually executes within the next vblank + * period. This helper function implements exactly the required vblank arming + * behaviour. + * + * Caller must hold event lock. Caller must also hold a vblank reference for + * the event @e, which will be dropped when the next vblank arrives. + * + * This is the legacy version of drm_crtc_arm_vblank_event(). + */ +void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e) +{ + assert_spin_locked(&dev->event_lock); + + e->pipe = pipe; + e->event.sequence = drm_vblank_count(dev, pipe); + list_add_tail(&e->base.link, &dev->vblank_event_list); +} +EXPORT_SYMBOL(drm_arm_vblank_event); + +/** + * drm_crtc_arm_vblank_event - arm vblank event after pageflip + * @crtc: the source CRTC of the vblank event + * @e: the event to send + * + * A lot of drivers need to generate vblank events for the very next vblank + * interrupt. For example when the page flip interrupt happens when the page + * flip gets armed, but not when it actually executes within the next vblank + * period. This helper function implements exactly the required vblank arming + * behaviour. + * + * Caller must hold event lock. Caller must also hold a vblank reference for + * the event @e, which will be dropped when the next vblank arrives. + * + * This is the native KMS version of drm_arm_vblank_event(). + */ +void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e) +{ + drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); +} +EXPORT_SYMBOL(drm_crtc_arm_vblank_event); + +/** * drm_send_vblank_event - helper to send vblank event after pageflip * @dev: DRM device * @pipe: CRTC index diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index cd74a0953f42..ef6bd3656548 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -917,13 +917,30 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ } else if (mode1->clock != mode2->clock) return false; + return drm_mode_equal_no_clocks(mode1, mode2); +} +EXPORT_SYMBOL(drm_mode_equal); + +/** + * drm_mode_equal_no_clocks - test modes for equality + * @mode1: first mode + * @mode2: second mode + * + * Check to see if @mode1 and @mode2 are equivalent, but + * don't check the pixel clocks. + * + * Returns: + * True if the modes are equal, false otherwise. + */ +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) +{ if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != (mode2->flags & DRM_MODE_FLAG_3D_MASK)) return false; return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); } -EXPORT_SYMBOL(drm_mode_equal); +EXPORT_SYMBOL(drm_mode_equal_no_clocks); /** * drm_mode_equal_no_clocks_no_stereo - test modes for equality @@ -1230,7 +1247,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; bool yres_specified = false, cvt = false, rb = false; bool interlace = false, margins = false, was_digit = false; - int i; + int i, err; enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; #ifdef CONFIG_FB @@ -1250,7 +1267,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, case '@': if (!refresh_specified && !bpp_specified && !yres_specified && !cvt && !rb && was_digit) { - refresh = simple_strtol(&name[i+1], NULL, 10); + err = kstrtouint(&name[i + 1], 10, &refresh); + if (err) + return false; refresh_specified = true; was_digit = false; } else @@ -1259,7 +1278,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, case '-': if (!bpp_specified && !yres_specified && !cvt && !rb && was_digit) { - bpp = simple_strtol(&name[i+1], NULL, 10); + err = kstrtouint(&name[i + 1], 10, &bpp); + if (err) + return false; bpp_specified = true; was_digit = false; } else @@ -1267,7 +1288,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, break; case 'x': if (!yres_specified && was_digit) { - yres = simple_strtol(&name[i+1], NULL, 10); + err = kstrtouint(&name[i + 1], 10, &yres); + if (err) + return false; yres_specified = true; was_digit = false; } else @@ -1491,4 +1514,4 @@ int drm_mode_convert_umode(struct drm_display_mode *out, out: return ret; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 6675b1428410..c2f5971146ba 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -57,11 +57,18 @@ /** * drm_modeset_lock_all - take all modeset locks - * @dev: drm device + * @dev: DRM device * * This function takes all modeset locks, suitable where a more fine-grained - * scheme isn't (yet) implemented. Locks must be dropped with - * drm_modeset_unlock_all. + * scheme isn't (yet) implemented. Locks must be dropped by calling the + * drm_modeset_unlock_all() function. + * + * This function is deprecated. It allocates a lock acquisition context and + * stores it in the DRM device's ->mode_config. This facilitate conversion of + * existing code because it removes the need to manually deal with the + * acquisition context, but it is also brittle because the context is global + * and care must be taken not to nest calls. New code should use the + * drm_modeset_lock_all_ctx() function and pass in the context explicitly. */ void drm_modeset_lock_all(struct drm_device *dev) { @@ -78,39 +85,43 @@ void drm_modeset_lock_all(struct drm_device *dev) drm_modeset_acquire_init(ctx, 0); retry: - ret = drm_modeset_lock(&config->connection_mutex, ctx); - if (ret) - goto fail; - ret = drm_modeset_lock_all_crtcs(dev, ctx); - if (ret) - goto fail; + ret = drm_modeset_lock_all_ctx(dev, ctx); + if (ret < 0) { + if (ret == -EDEADLK) { + drm_modeset_backoff(ctx); + goto retry; + } + + drm_modeset_acquire_fini(ctx); + kfree(ctx); + return; + } WARN_ON(config->acquire_ctx); - /* now we hold the locks, so now that it is safe, stash the - * ctx for drm_modeset_unlock_all(): + /* + * We hold the locks now, so it is safe to stash the acquisition + * context for drm_modeset_unlock_all(). */ config->acquire_ctx = ctx; drm_warn_on_modeset_not_all_locked(dev); - - return; - -fail: - if (ret == -EDEADLK) { - drm_modeset_backoff(ctx); - goto retry; - } - - kfree(ctx); } EXPORT_SYMBOL(drm_modeset_lock_all); /** * drm_modeset_unlock_all - drop all modeset locks - * @dev: device + * @dev: DRM device * - * This function drop all modeset locks taken by drm_modeset_lock_all. + * This function drops all modeset locks taken by a previous call to the + * drm_modeset_lock_all() function. + * + * This function is deprecated. It uses the lock acquisition context stored + * in the DRM device's ->mode_config. This facilitates conversion of existing + * code because it removes the need to manually deal with the acquisition + * context, but it is also brittle because the context is global and care must + * be taken not to nest calls. New code should pass the acquisition context + * directly to the drm_modeset_drop_locks() function. */ void drm_modeset_unlock_all(struct drm_device *dev) { @@ -431,14 +442,34 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock) } EXPORT_SYMBOL(drm_modeset_unlock); -/* In some legacy codepaths it's convenient to just grab all the crtc and plane - * related locks. */ -int drm_modeset_lock_all_crtcs(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx) +/** + * drm_modeset_lock_all_ctx - take all modeset locks + * @dev: DRM device + * @ctx: lock acquisition context + * + * This function takes all modeset locks, suitable where a more fine-grained + * scheme isn't (yet) implemented. + * + * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex + * since that lock isn't required for modeset state changes. Callers which + * need to grab that lock too need to do so outside of the acquire context + * @ctx. + * + * Locks acquired with this function should be released by calling the + * drm_modeset_drop_locks() function on @ctx. + * + * Returns: 0 on success or a negative error-code on failure. + */ +int drm_modeset_lock_all_ctx(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct drm_crtc *crtc; struct drm_plane *plane; - int ret = 0; + int ret; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ret; drm_for_each_crtc(crtc, dev) { ret = drm_modeset_lock(&crtc->mutex, ctx); @@ -454,4 +485,4 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev, return 0; } -EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); +EXPORT_SYMBOL(drm_modeset_lock_all_ctx); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index d384ebcf0aaf..a6983d41920d 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -164,6 +164,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane, vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); if (hscale < 0 || vscale < 0) { DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_rect_debug_print("src: ", src, true); + drm_rect_debug_print("dst: ", dest, false); return -ERANGE; } @@ -180,6 +182,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane, if (!can_position && !drm_rect_equals(dest, clip)) { DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_rect_debug_print("dst: ", dest, false); + drm_rect_debug_print("clip: ", clip, false); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a18164f2f6d2..eee3b6f38cfb 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -147,6 +147,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect list_for_each_entry(mode, &connector->modes, head) mode->status = MODE_UNVERIFIED; + old_status = connector->status; + if (connector->force) { if (connector->force == DRM_FORCE_ON || connector->force == DRM_FORCE_ON_DIGITAL) @@ -156,33 +158,32 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect if (connector->funcs->force) connector->funcs->force(connector); } else { - old_status = connector->status; - connector->status = connector->funcs->detect(connector, true); + } + + /* + * Normally either the driver's hpd code or the poll loop should + * pick up any changes and fire the hotplug event. But if + * userspace sneaks in a probe, we might miss a change. Hence + * check here, and if anything changed start the hotplug code. + */ + if (old_status != connector->status) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status)); /* - * Normally either the driver's hpd code or the poll loop should - * pick up any changes and fire the hotplug event. But if - * userspace sneaks in a probe, we might miss a change. Hence - * check here, and if anything changed start the hotplug code. + * The hotplug event code might call into the fb + * helpers, and so expects that we do not hold any + * locks. Fire up the poll struct instead, it will + * disable itself again. */ - if (old_status != connector->status) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", - connector->base.id, - connector->name, - old_status, connector->status); - - /* - * The hotplug event code might call into the fb - * helpers, and so expects that we do not hold any - * locks. Fire up the poll struct instead, it will - * disable itself again. - */ - dev->mode_config.delayed_event = true; - if (dev->mode_config.poll_enabled) - schedule_delayed_work(&dev->mode_config.output_poll_work, - 0); - } + dev->mode_config.delayed_event = true; + if (dev->mode_config.poll_enabled) + schedule_delayed_work(&dev->mode_config.output_poll_work, + 0); } /* Re-enable polling in case the global poll config changed. */ diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index 531ac4cc9756..a8e2c8603945 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -275,22 +275,23 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed); /** * drm_rect_debug_print - print the rectangle information + * @prefix: prefix string * @r: rectangle to print * @fixed_point: rectangle is in 16.16 fixed point format */ -void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point) +void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point) { int w = drm_rect_width(r); int h = drm_rect_height(r); if (fixed_point) - DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", + DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix, w >> 16, ((w & 0xffff) * 15625) >> 10, h >> 16, ((h & 0xffff) * 15625) >> 10, r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10, r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10); else - DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); + DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1); } EXPORT_SYMBOL(drm_rect_debug_print); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 615b7e667320..0ca64106a97b 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -167,47 +167,35 @@ static ssize_t status_store(struct device *device, { struct drm_connector *connector = to_drm_connector(device); struct drm_device *dev = connector->dev; - enum drm_connector_status old_status; + enum drm_connector_force old_force; int ret; ret = mutex_lock_interruptible(&dev->mode_config.mutex); if (ret) return ret; - old_status = connector->status; + old_force = connector->force; - if (sysfs_streq(buf, "detect")) { + if (sysfs_streq(buf, "detect")) connector->force = 0; - connector->status = connector->funcs->detect(connector, true); - } else if (sysfs_streq(buf, "on")) { + else if (sysfs_streq(buf, "on")) connector->force = DRM_FORCE_ON; - } else if (sysfs_streq(buf, "on-digital")) { + else if (sysfs_streq(buf, "on-digital")) connector->force = DRM_FORCE_ON_DIGITAL; - } else if (sysfs_streq(buf, "off")) { + else if (sysfs_streq(buf, "off")) connector->force = DRM_FORCE_OFF; - } else + else ret = -EINVAL; - if (ret == 0 && connector->force) { - if (connector->force == DRM_FORCE_ON || - connector->force == DRM_FORCE_ON_DIGITAL) - connector->status = connector_status_connected; - else - connector->status = connector_status_disconnected; - if (connector->funcs->force) - connector->funcs->force(connector); - } - - if (old_status != connector->status) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", + if (old_force != connector->force || !connector->force) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n", connector->base.id, connector->name, - old_status, connector->status); + old_force, connector->force); - dev->mode_config.delayed_event = true; - if (dev->mode_config.poll_enabled) - schedule_delayed_work(&dev->mode_config.output_poll_work, - 0); + connector->funcs->fill_modes(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); } mutex_unlock(&dev->mode_config.mutex); @@ -256,23 +244,29 @@ static ssize_t edid_show(struct file *filp, struct kobject *kobj, struct drm_connector *connector = to_drm_connector(connector_dev); unsigned char *edid; size_t size; + ssize_t ret = 0; + mutex_lock(&connector->dev->mode_config.mutex); if (!connector->edid_blob_ptr) - return 0; + goto unlock; edid = connector->edid_blob_ptr->data; size = connector->edid_blob_ptr->length; if (!edid) - return 0; + goto unlock; if (off >= size) - return 0; + goto unlock; if (off + count > size) count = size - off; memcpy(buf, edid + off, count); - return count; + ret = count; +unlock: + mutex_unlock(&connector->dev->mode_config.mutex); + + return ret; } static ssize_t modes_show(struct device *device, @@ -283,10 +277,12 @@ static ssize_t modes_show(struct device *device, struct drm_display_mode *mode; int written = 0; + mutex_lock(&connector->dev->mode_config.mutex); list_for_each_entry(mode, &connector->modes, head) { written += snprintf(buf + written, PAGE_SIZE - written, "%s\n", mode->name); } + mutex_unlock(&connector->dev->mode_config.mutex); return written; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index fcea28bdbc42..49b9bc302e87 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -117,7 +117,7 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = { struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct exynos_drm_gem **exynos_gem, int count) { @@ -154,7 +154,7 @@ err: static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; struct drm_gem_object *obj; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 726a2d44371f..a8a75ac87e59 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -18,7 +18,7 @@ struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct exynos_drm_gem **exynos_gem, int count); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 2eaf1b31c7bd..ee95c03a8c54 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -241,7 +241,7 @@ static struct fb_ops psbfb_unaccel_ops = { */ static int psb_framebuffer_init(struct drm_device *dev, struct psb_framebuffer *fb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct gtt_range *gt) { u32 bpp, depth; @@ -284,7 +284,7 @@ static int psb_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer *psb_framebuffer_create (struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct gtt_range *gt) { struct psb_framebuffer *fb; @@ -406,8 +406,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, memset(dev_priv->vram_addr + backing->offset, 0, size); - mutex_lock(&dev->struct_mutex); - info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper); if (IS_ERR(info)) { ret = PTR_ERR(info); @@ -463,17 +461,15 @@ static int psbfb_create(struct psb_fbdev *fbdev, dev_dbg(dev->dev, "allocated %dx%d fb\n", psbfb->base.width, psbfb->base.height); - mutex_unlock(&dev->struct_mutex); return 0; out_unref: if (backing->stolen) psb_gtt_free_range(dev, backing); else - drm_gem_object_unreference(&backing->gem); + drm_gem_object_unreference_unlocked(&backing->gem); drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); out_err1: - mutex_unlock(&dev->struct_mutex); psb_gtt_free_range(dev, backing); return ret; } @@ -488,7 +484,7 @@ out_err1: */ static struct drm_framebuffer *psb_user_framebuffer_create (struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *cmd) + const struct drm_mode_fb_cmd2 *cmd) { struct gtt_range *r; struct drm_gem_object *obj; @@ -569,7 +565,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) drm_framebuffer_cleanup(&psbfb->base); if (psbfb->gtt) - drm_gem_object_unreference(&psbfb->gtt->gem); + drm_gem_object_unreference_unlocked(&psbfb->gtt->gem); return 0; } @@ -784,12 +780,8 @@ void psb_modeset_cleanup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; if (dev_priv->modeset) { - mutex_lock(&dev->struct_mutex); - drm_kms_helper_poll_fini(dev); psb_fbdev_fini(dev); drm_mode_config_cleanup(dev); - - mutex_unlock(&dev->struct_mutex); } } diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index c707fa6fca85..506224b3a0ad 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -62,15 +62,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, int ret = 0; struct drm_gem_object *obj; - mutex_lock(&dev->struct_mutex); - /* GEM does all our handle to object mapping */ obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto unlock; - } - /* What validation is needed here ? */ + if (obj == NULL) + return -ENOENT; /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); @@ -78,9 +73,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); out: - drm_gem_object_unreference(obj); -unlock: - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -130,7 +123,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, return ret; } /* We have the initial and handle reference but need only one now */ - drm_gem_object_unreference(&r->gem); + drm_gem_object_unreference_unlocked(&r->gem); *handlep = handle; return 0; } @@ -189,7 +182,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Make sure we don't parallel update on a fault, nor move or remove something from beneath our feet */ - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->mmap_mutex); /* For now the mmap pins the object and it stays pinned. As things stand that will do us no harm */ @@ -215,7 +208,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); fail: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->mmap_mutex); switch (ret) { case 0: case -ERESTARTSYS: diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 001b450b27b3..ff17af4cfc64 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -349,8 +349,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, /* If we didn't get a handle then turn the cursor off */ if (!handle) { temp = CURSOR_MODE_DISABLE; - mutex_lock(&dev->struct_mutex); - if (gma_power_begin(dev, false)) { REG_WRITE(control, temp); REG_WRITE(base, 0); @@ -362,11 +360,9 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); - drm_gem_object_unreference(gma_crtc->cursor_obj); + drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj); gma_crtc->cursor_obj = NULL; } - - mutex_unlock(&dev->struct_mutex); return 0; } @@ -376,7 +372,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) { ret = -ENOENT; @@ -441,17 +436,15 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, if (gma_crtc->cursor_obj) { gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); - drm_gem_object_unreference(gma_crtc->cursor_obj); + drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj); } gma_crtc->cursor_obj = obj; unlock: - mutex_unlock(&dev->struct_mutex); return ret; unref_cursor: - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index ce015db59dc6..8f69225ce2b4 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -425,6 +425,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) if (!resume) { mutex_init(&dev_priv->gtt_mutex); + mutex_init(&dev_priv->mmap_mutex); psb_gtt_alloc(dev); } diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index e21726ecac32..3bd2c726dd61 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -465,6 +465,8 @@ struct drm_psb_private { struct mutex gtt_mutex; struct resource *gtt_mem; /* Our PCI resource */ + struct mutex mmap_mutex; + struct psb_mmu_driver *mmu; struct psb_mmu_pd *pf_pd; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 051eab33e4c7..fcd77b27514d 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -10,6 +10,7 @@ config DRM_I915 # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS + select STOP_MACHINE select DRM_KMS_HELPER select DRM_PANEL select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 44d290ae1999..0851de07bd13 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -77,6 +77,7 @@ i915-y += dvo_ch7017.o \ dvo_tfp410.o \ intel_crt.o \ intel_ddi.o \ + intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ intel_dsi.o \ diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 0e2c1b9648a7..13dea4263554 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -32,7 +32,8 @@ struct intel_dvo_device { const char *name; int type; /* DVOA/B/C output register */ - u32 dvo_reg; + i915_reg_t dvo_reg; + i915_reg_t dvo_srcdim_reg; /* GPIO register used for i2c bus to control this device */ u32 gpio; int slave_addr; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index db58c8d664c2..814d894ed925 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { * LRI. */ struct drm_i915_reg_descriptor { - u32 addr; + i915_reg_t addr; u32 mask; u32 value; }; /* Convenience macro for adding 32-bit registers. */ -#define REG32(address, ...) \ - { .addr = address, __VA_ARGS__ } +#define REG32(_reg, ...) \ + { .addr = (_reg), __VA_ARGS__ } /* * Convenience macro for adding 64-bit registers. @@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor { * access commands only allow 32-bit accesses. Hence, we have to include * entries for both halves of the 64-bit registers. */ -#define REG64(addr) \ - REG32(addr), REG32(addr + sizeof(u32)) +#define REG64(_reg) \ + { .addr = _reg }, \ + { .addr = _reg ## _UDW } + +#define REG64_IDX(_reg, idx) \ + { .addr = _reg(idx) }, \ + { .addr = _reg ## _UDW(idx) } static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(GPGPU_THREADS_DISPATCHED), @@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG32(GEN7_GPGPU_DISPATCHDIMX), REG32(GEN7_GPGPU_DISPATCHDIMY), REG32(GEN7_GPGPU_DISPATCHDIMZ), - REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), - REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), - REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), - REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)), - REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)), - REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), - REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), - REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), + REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0), + REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1), + REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2), + REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3), + REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0), + REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1), + REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2), + REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3), REG32(GEN7_SO_WRITE_OFFSET(0)), REG32(GEN7_SO_WRITE_OFFSET(1)), REG32(GEN7_SO_WRITE_OFFSET(2)), @@ -592,7 +597,7 @@ static bool check_sorted(int ring_id, bool ret = true; for (i = 0; i < reg_count; i++) { - u32 curr = reg_table[i].addr; + u32 curr = i915_mmio_reg_offset(reg_table[i].addr); if (curr < previous) { DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", @@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table, int i; for (i = 0; i < count; i++) { - if (table[i].addr == addr) + if (i915_mmio_reg_offset(table[i].addr) == addr) return &table[i]; } } @@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, * to the register. Hence, limit OACONTROL writes to * only MI_LOAD_REGISTER_IMM commands. */ - if (reg_addr == OACONTROL) { + if (reg_addr == i915_mmio_reg_offset(OACONTROL)) { if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); return false; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a3b22bdacd44..411a9c68b4ee 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1252,18 +1252,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; - max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); + max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? + GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; - max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); + max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? + GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; - max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); + max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? + GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); seq_printf(m, "Max overclocked frequency: %dMHz\n", @@ -1523,7 +1526,7 @@ static int gen6_drpc_info(struct seq_file *m) seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); } - gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); + gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); rpmodectl1 = I915_READ(GEN6_RP_CONTROL); @@ -1640,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) seq_puts(m, "FBC enabled\n"); else seq_printf(m, "FBC disabled: %s\n", - intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason)); + dev_priv->fbc.no_fbc_reason); if (INTEL_INFO(dev_priv)->gen >= 7) seq_printf(m, "Compressing: %s\n", @@ -1801,7 +1804,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) if (ret) goto out; - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; @@ -1821,7 +1824,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) &ia_freq); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", intel_gpu_freq(dev_priv, (gpu_freq * - (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))), + (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? + GEN9_FREQ_SCALER : 1))), ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); } @@ -1873,17 +1877,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; ifbdev = dev_priv->fbdev; - fb = to_intel_framebuffer(ifbdev->helper.fb); - - seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", - fb->base.width, - fb->base.height, - fb->base.depth, - fb->base.bits_per_pixel, - fb->base.modifier[0], - atomic_read(&fb->base.refcount.refcount)); - describe_obj(m, fb->obj); - seq_putc(m, '\n'); + if (ifbdev) { + fb = to_intel_framebuffer(ifbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel, + fb->base.modifier[0], + atomic_read(&fb->base.refcount.refcount)); + describe_obj(m, fb->obj); + seq_putc(m, '\n'); + } #endif mutex_lock(&dev->mode_config.fb_lock); @@ -2402,6 +2408,12 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); seq_printf(m, "\tversion found: %d.%d\n", guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); + seq_printf(m, "\theader: offset is %d; size = %d\n", + guc_fw->header_offset, guc_fw->header_size); + seq_printf(m, "\tuCode: offset is %d; size = %d\n", + guc_fw->ucode_offset, guc_fw->ucode_size); + seq_printf(m, "\tRSA: offset is %d; size = %d\n", + guc_fw->rsa_offset, guc_fw->rsa_size); tmp = I915_READ(GUC_STATUS); @@ -2550,7 +2562,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) yesno(work_busy(&dev_priv->psr.work.work))); if (HAS_DDI(dev)) - enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; + enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; else { for_each_pipe(dev_priv, pipe) { stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & @@ -2572,7 +2584,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) /* CHV PSR has no kind of performance counter */ if (HAS_DDI(dev)) { - psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & + psrperf = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; seq_printf(m, "Performance_Counter: %u\n", psrperf); @@ -2696,24 +2708,16 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; - case POWER_DOMAIN_PORT_DDI_A_2_LANES: - return "PORT_DDI_A_2_LANES"; - case POWER_DOMAIN_PORT_DDI_A_4_LANES: - return "PORT_DDI_A_4_LANES"; - case POWER_DOMAIN_PORT_DDI_B_2_LANES: - return "PORT_DDI_B_2_LANES"; - case POWER_DOMAIN_PORT_DDI_B_4_LANES: - return "PORT_DDI_B_4_LANES"; - case POWER_DOMAIN_PORT_DDI_C_2_LANES: - return "PORT_DDI_C_2_LANES"; - case POWER_DOMAIN_PORT_DDI_C_4_LANES: - return "PORT_DDI_C_4_LANES"; - case POWER_DOMAIN_PORT_DDI_D_2_LANES: - return "PORT_DDI_D_2_LANES"; - case POWER_DOMAIN_PORT_DDI_D_4_LANES: - return "PORT_DDI_D_4_LANES"; - case POWER_DOMAIN_PORT_DDI_E_2_LANES: - return "PORT_DDI_E_2_LANES"; + case POWER_DOMAIN_PORT_DDI_A_LANES: + return "PORT_DDI_A_LANES"; + case POWER_DOMAIN_PORT_DDI_B_LANES: + return "PORT_DDI_B_LANES"; + case POWER_DOMAIN_PORT_DDI_C_LANES: + return "PORT_DDI_C_LANES"; + case POWER_DOMAIN_PORT_DDI_D_LANES: + return "PORT_DDI_D_LANES"; + case POWER_DOMAIN_PORT_DDI_E_LANES: + return "PORT_DDI_E_LANES"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -2734,6 +2738,10 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "AUX_C"; case POWER_DOMAIN_AUX_D: return "AUX_D"; + case POWER_DOMAIN_GMBUS: + return "GMBUS"; + case POWER_DOMAIN_MODESET: + return "MODESET"; case POWER_DOMAIN_INIT: return "INIT"; default: @@ -2777,6 +2785,51 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) return 0; } +static int i915_dmc_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_csr *csr; + + if (!HAS_CSR(dev)) { + seq_puts(m, "not supported\n"); + return 0; + } + + csr = &dev_priv->csr; + + intel_runtime_pm_get(dev_priv); + + seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); + seq_printf(m, "path: %s\n", csr->fw_path); + + if (!csr->dmc_payload) + goto out; + + seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), + CSR_VERSION_MINOR(csr->version)); + + if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) { + seq_printf(m, "DC3 -> DC5 count: %d\n", + I915_READ(SKL_CSR_DC3_DC5_COUNT)); + seq_printf(m, "DC5 -> DC6 count: %d\n", + I915_READ(SKL_CSR_DC5_DC6_COUNT)); + } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) { + seq_printf(m, "DC3 -> DC5 count: %d\n", + I915_READ(BXT_CSR_DC3_DC5_COUNT)); + } + +out: + seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); + seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); + seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); + + intel_runtime_pm_put(dev_priv); + + return 0; +} + static void intel_seq_print_mode(struct seq_file *m, int tabs, struct drm_display_mode *mode) { @@ -2944,6 +2997,107 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) return cursor_active(dev, pipe); } +static const char *plane_type(enum drm_plane_type type) +{ + switch (type) { + case DRM_PLANE_TYPE_OVERLAY: + return "OVL"; + case DRM_PLANE_TYPE_PRIMARY: + return "PRI"; + case DRM_PLANE_TYPE_CURSOR: + return "CUR"; + /* + * Deliberately omitting default: to generate compiler warnings + * when a new drm_plane_type gets added. + */ + } + + return "unknown"; +} + +static const char *plane_rotation(unsigned int rotation) +{ + static char buf[48]; + /* + * According to doc only one DRM_ROTATE_ is allowed but this + * will print them all to visualize if the values are misused + */ + snprintf(buf, sizeof(buf), + "%s%s%s%s%s%s(0x%08x)", + (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "", + (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "", + (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "", + (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "", + (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "", + (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "", + rotation); + + return buf; +} + +static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct intel_plane *intel_plane; + + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + struct drm_plane_state *state; + struct drm_plane *plane = &intel_plane->base; + + if (!plane->state) { + seq_puts(m, "plane->state is NULL!\n"); + continue; + } + + state = plane->state; + + seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", + plane->base.id, + plane_type(intel_plane->base.type), + state->crtc_x, state->crtc_y, + state->crtc_w, state->crtc_h, + (state->src_x >> 16), + ((state->src_x & 0xffff) * 15625) >> 10, + (state->src_y >> 16), + ((state->src_y & 0xffff) * 15625) >> 10, + (state->src_w >> 16), + ((state->src_w & 0xffff) * 15625) >> 10, + (state->src_h >> 16), + ((state->src_h & 0xffff) * 15625) >> 10, + state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A", + plane_rotation(state->rotation)); + } +} + +static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) +{ + struct intel_crtc_state *pipe_config; + int num_scalers = intel_crtc->num_scalers; + int i; + + pipe_config = to_intel_crtc_state(intel_crtc->base.state); + + /* Not all platformas have a scaler */ + if (num_scalers) { + seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", + num_scalers, + pipe_config->scaler_state.scaler_users, + pipe_config->scaler_state.scaler_id); + + for (i = 0; i < SKL_NUM_SCALERS; i++) { + struct intel_scaler *sc = + &pipe_config->scaler_state.scalers[i]; + + seq_printf(m, ", scalers[%d]: use=%s, mode=%x", + i, yesno(sc->in_use), sc->mode); + } + seq_puts(m, "\n"); + } else { + seq_puts(m, "\tNo scalers available on this platform\n"); + } +} + static int i915_display_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -2963,10 +3117,12 @@ static int i915_display_info(struct seq_file *m, void *unused) pipe_config = to_intel_crtc_state(crtc->base.state); - seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", + seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", crtc->base.base.id, pipe_name(crtc->pipe), yesno(pipe_config->base.active), - pipe_config->pipe_src_w, pipe_config->pipe_src_h); + pipe_config->pipe_src_w, pipe_config->pipe_src_h, + yesno(pipe_config->dither), pipe_config->pipe_bpp); + if (pipe_config->base.active) { intel_crtc_info(m, crtc); @@ -2976,6 +3132,8 @@ static int i915_display_info(struct seq_file *m, void *unused) x, y, crtc->base.cursor->state->crtc_w, crtc->base.cursor->state->crtc_h, crtc->cursor_addr, yesno(active)); + intel_scaler_info(m, crtc); + intel_plane_info(m, crtc); } seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", @@ -3110,7 +3268,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused) seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); for (i = 0; i < dev_priv->workarounds.count; ++i) { - u32 addr, mask, value, read; + i915_reg_t addr; + u32 mask, value, read; bool ok; addr = dev_priv->workarounds.reg[i].addr; @@ -3119,7 +3278,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) read = I915_READ(addr); ok = (value & mask) == (read & mask); seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", - addr, value, mask, read, ok ? "OK" : "FAIL"); + i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); } intel_runtime_pm_put(dev_priv); @@ -5023,7 +5182,7 @@ static void gen9_sseu_device_status(struct drm_device *dev, stat->slice_total++; - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) ss_cnt = INTEL_INFO(dev)->subslice_per_slice; for (ss = 0; ss < ss_max; ss++) { @@ -5236,6 +5395,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, + {"i915_dmc_info", i915_dmc_info, 0}, {"i915_display_info", i915_display_info, 0}, {"i915_semaphore_status", i915_semaphore_status, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b4741d121a74..a81c76603544 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -28,7 +28,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/async.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> @@ -338,7 +337,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ i915_resume_switcheroo(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { - pr_err("switched off\n"); + pr_info("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; i915_suspend_switcheroo(dev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; @@ -396,7 +395,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_switcheroo; - intel_power_domains_init_hw(dev_priv); + intel_power_domains_init_hw(dev_priv, false); + + intel_csr_ucode_init(dev_priv); ret = intel_irq_install(dev_priv); if (ret) @@ -437,7 +438,7 @@ static int i915_load_modeset_init(struct drm_device *dev) * scanning against hotplug events. Hence do this first and ignore the * tiny window where we will loose hotplug notifactions. */ - async_schedule(intel_fbdev_initial_config, dev_priv); + intel_fbdev_initial_config_async(dev); drm_kms_helper_poll_init(dev); @@ -663,7 +664,8 @@ static void gen9_sseu_info_init(struct drm_device *dev) * supports EU power gating on devices with more than one EU * pair per subslice. */ - info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1)); + info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + (info->slice_total > 1)); info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); info->has_eu_pg = (info->eu_per_subslice > 2); } @@ -890,7 +892,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->mmio_flip_lock); mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->modeset_restore_lock); - mutex_init(&dev_priv->csr_lock); mutex_init(&dev_priv->av_mutex); intel_pm_setup(dev); @@ -937,9 +938,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_uncore_init(dev); - /* Load CSR Firmware for SKL */ - intel_csr_ucode_init(dev); - ret = i915_gem_gtt_init(dev); if (ret) goto out_freecsr; @@ -1113,7 +1111,7 @@ out_mtrrfree: out_gtt: i915_global_gtt_cleanup(dev); out_freecsr: - intel_csr_ucode_fini(dev); + intel_csr_ucode_fini(dev_priv); intel_uncore_fini(dev); pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: @@ -1131,6 +1129,8 @@ int i915_driver_unload(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; + intel_fbdev_fini(dev); + i915_audio_component_cleanup(dev_priv); ret = i915_gem_suspend(dev); @@ -1153,8 +1153,6 @@ int i915_driver_unload(struct drm_device *dev) acpi_video_unregister(); - intel_fbdev_fini(dev); - drm_vblank_cleanup(dev); intel_modeset_cleanup(dev); @@ -1196,7 +1194,7 @@ int i915_driver_unload(struct drm_device *dev) intel_fbc_cleanup_cfb(dev_priv); i915_gem_cleanup_stolen(dev); - intel_csr_ucode_fini(dev); + intel_csr_ucode_fini(dev_priv); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); @@ -1264,8 +1262,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - if (file_priv && file_priv->bsd_ring) - file_priv->bsd_ring = NULL; kfree(file_priv); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 760e0ce4aa26..6344dfb72177 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -383,6 +383,7 @@ static const struct intel_device_info intel_skylake_gt3_info = { static const struct intel_device_info intel_broxton_info = { .is_preliminary = 1, + .is_broxton = 1, .gen = 9, .need_gfx_hws = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, @@ -394,50 +395,81 @@ static const struct intel_device_info intel_broxton_info = { IVB_CURSOR_OFFSETS, }; +static const struct intel_device_info intel_kabylake_info = { + .is_preliminary = 1, + .is_kabylake = 1, + .gen = 9, + .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .has_llc = 1, + .has_ddi = 1, + .has_fpga_dbg = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_kabylake_gt3_info = { + .is_preliminary = 1, + .is_kabylake = 1, + .gen = 9, + .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .has_llc = 1, + .has_ddi = 1, + .has_fpga_dbg = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, +}; + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem * and subvendor IDs, we need it to come before the more general IVB * PCI ID matches, otherwise we'll use the wrong info struct above. */ -#define INTEL_PCI_IDS \ - INTEL_I830_IDS(&intel_i830_info), \ - INTEL_I845G_IDS(&intel_845g_info), \ - INTEL_I85X_IDS(&intel_i85x_info), \ - INTEL_I865G_IDS(&intel_i865g_info), \ - INTEL_I915G_IDS(&intel_i915g_info), \ - INTEL_I915GM_IDS(&intel_i915gm_info), \ - INTEL_I945G_IDS(&intel_i945g_info), \ - INTEL_I945GM_IDS(&intel_i945gm_info), \ - INTEL_I965G_IDS(&intel_i965g_info), \ - INTEL_G33_IDS(&intel_g33_info), \ - INTEL_I965GM_IDS(&intel_i965gm_info), \ - INTEL_GM45_IDS(&intel_gm45_info), \ - INTEL_G45_IDS(&intel_g45_info), \ - INTEL_PINEVIEW_IDS(&intel_pineview_info), \ - INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ - INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ - INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ - INTEL_HSW_D_IDS(&intel_haswell_d_info), \ - INTEL_HSW_M_IDS(&intel_haswell_m_info), \ - INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ - INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ - INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ - INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ - INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ - INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ - INTEL_CHV_IDS(&intel_cherryview_info), \ - INTEL_SKL_GT1_IDS(&intel_skylake_info), \ - INTEL_SKL_GT2_IDS(&intel_skylake_info), \ - INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \ - INTEL_BXT_IDS(&intel_broxton_info) - -static const struct pci_device_id pciidlist[] = { /* aka */ - INTEL_PCI_IDS, +static const struct pci_device_id pciidlist[] = { + INTEL_I830_IDS(&intel_i830_info), + INTEL_I845G_IDS(&intel_845g_info), + INTEL_I85X_IDS(&intel_i85x_info), + INTEL_I865G_IDS(&intel_i865g_info), + INTEL_I915G_IDS(&intel_i915g_info), + INTEL_I915GM_IDS(&intel_i915gm_info), + INTEL_I945G_IDS(&intel_i945g_info), + INTEL_I945GM_IDS(&intel_i945gm_info), + INTEL_I965G_IDS(&intel_i965g_info), + INTEL_G33_IDS(&intel_g33_info), + INTEL_I965GM_IDS(&intel_i965gm_info), + INTEL_GM45_IDS(&intel_gm45_info), + INTEL_G45_IDS(&intel_g45_info), + INTEL_PINEVIEW_IDS(&intel_pineview_info), + INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), + INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), + INTEL_SNB_D_IDS(&intel_sandybridge_d_info), + INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ + INTEL_IVB_M_IDS(&intel_ivybridge_m_info), + INTEL_IVB_D_IDS(&intel_ivybridge_d_info), + INTEL_HSW_D_IDS(&intel_haswell_d_info), + INTEL_HSW_M_IDS(&intel_haswell_m_info), + INTEL_VLV_M_IDS(&intel_valleyview_m_info), + INTEL_VLV_D_IDS(&intel_valleyview_d_info), + INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), + INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), + INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), + INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), + INTEL_CHV_IDS(&intel_cherryview_info), + INTEL_SKL_GT1_IDS(&intel_skylake_info), + INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), + INTEL_BXT_IDS(&intel_broxton_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), + INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), {0, 0, 0} }; @@ -463,7 +495,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { ret = PCH_LPT; DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); - } else if (IS_SKYLAKE(dev)) { + } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { ret = PCH_SPT; DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); } @@ -526,11 +558,13 @@ void intel_detect_pch(struct drm_device *dev) } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); - WARN_ON(!IS_SKYLAKE(dev)); + WARN_ON(!IS_SKYLAKE(dev) && + !IS_KABYLAKE(dev)); } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); - WARN_ON(!IS_SKYLAKE(dev)); + WARN_ON(!IS_SKYLAKE(dev) && + !IS_KABYLAKE(dev)); } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { dev_priv->pch_type = intel_virt_detect_pch(dev); } else @@ -570,26 +604,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return true; } -void i915_firmware_load_error_print(const char *fw_path, int err) -{ - DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err); - - /* - * If the reason is not known assume -ENOENT since that's the most - * usual failure mode. - */ - if (!err) - err = -ENOENT; - - if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT)) - return; - - DRM_ERROR( - "The driver is built-in, so to load the firmware you need to\n" - "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n" - "in your initrd/initramfs image.\n"); -} - static void intel_suspend_encoders(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; @@ -608,7 +622,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) static int intel_suspend_complete(struct drm_i915_private *dev_priv); static int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume); -static int skl_resume_prepare(struct drm_i915_private *dev_priv); static int bxt_resume_prepare(struct drm_i915_private *dev_priv); @@ -679,6 +692,9 @@ static int i915_drm_suspend(struct drm_device *dev) intel_display_set_init_power(dev_priv, false); + if (HAS_CSR(dev_priv)) + flush_work(&dev_priv->csr.work); + return 0; } @@ -687,10 +703,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) struct drm_i915_private *dev_priv = drm_dev->dev_private; int ret; + intel_power_domains_suspend(dev_priv); + ret = intel_suspend_complete(dev_priv); if (ret) { DRM_ERROR("Suspend complete failed: %d\n", ret); + intel_power_domains_init_hw(dev_priv, true); return ret; } @@ -838,13 +857,11 @@ static int i915_drm_resume_early(struct drm_device *dev) if (IS_BROXTON(dev)) ret = bxt_resume_prepare(dev_priv); - else if (IS_SKYLAKE(dev_priv)) - ret = skl_resume_prepare(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_disable_pc8(dev_priv); intel_uncore_sanitize(dev); - intel_power_domains_init_hw(dev_priv); + intel_power_domains_init_hw(dev_priv, true); return ret; } @@ -1051,15 +1068,6 @@ static int i915_pm_resume(struct device *dev) return i915_drm_resume(drm_dev); } -static int skl_suspend_complete(struct drm_i915_private *dev_priv) -{ - /* Enabling DC6 is not a hard requirement to enter runtime D3 */ - - skl_uninit_cdclk(dev_priv); - - return 0; -} - static int hsw_suspend_complete(struct drm_i915_private *dev_priv) { hsw_enable_pc8(dev_priv); @@ -1099,16 +1107,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv) return 0; } -static int skl_resume_prepare(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - skl_init_cdclk(dev_priv); - intel_csr_load_program(dev); - - return 0; -} - /* * Save all Gunit registers that may be lost after a D3 and a subsequent * S0i[R123] transition. The list of registers needing a save/restore is @@ -1572,8 +1570,6 @@ static int intel_runtime_resume(struct device *device) if (IS_BROXTON(dev)) ret = bxt_resume_prepare(dev_priv); - else if (IS_SKYLAKE(dev)) - ret = skl_resume_prepare(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_disable_pc8(dev_priv); else if (IS_VALLEYVIEW(dev_priv)) @@ -1616,8 +1612,6 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv) if (IS_BROXTON(dev_priv)) ret = bxt_suspend_complete(dev_priv); - else if (IS_SKYLAKE(dev_priv)) - ret = skl_suspend_complete(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) ret = hsw_suspend_complete(dev_priv); else if (IS_VALLEYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 95bb27de774f..9dbc143cac27 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -57,7 +57,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20151010" +#define DRIVER_DATE "20151120" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -180,15 +180,11 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP, - POWER_DOMAIN_PORT_DDI_A_2_LANES, - POWER_DOMAIN_PORT_DDI_A_4_LANES, - POWER_DOMAIN_PORT_DDI_B_2_LANES, - POWER_DOMAIN_PORT_DDI_B_4_LANES, - POWER_DOMAIN_PORT_DDI_C_2_LANES, - POWER_DOMAIN_PORT_DDI_C_4_LANES, - POWER_DOMAIN_PORT_DDI_D_2_LANES, - POWER_DOMAIN_PORT_DDI_D_4_LANES, - POWER_DOMAIN_PORT_DDI_E_2_LANES, + POWER_DOMAIN_PORT_DDI_A_LANES, + POWER_DOMAIN_PORT_DDI_B_LANES, + POWER_DOMAIN_PORT_DDI_C_LANES, + POWER_DOMAIN_PORT_DDI_D_LANES, + POWER_DOMAIN_PORT_DDI_E_LANES, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -199,6 +195,8 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, @@ -288,7 +286,7 @@ struct i915_hotplug { list_for_each_entry(intel_plane, \ &(dev)->mode_config.plane_list, \ base.head) \ - if ((intel_plane)->pipe == (intel_crtc)->pipe) + for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) #define for_each_intel_crtc(dev, intel_crtc) \ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) @@ -305,15 +303,15 @@ struct i915_hotplug { #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ - if ((intel_encoder)->base.crtc == (__crtc)) + for_each_if ((intel_encoder)->base.crtc == (__crtc)) #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ - if ((intel_connector)->base.encoder == (__encoder)) + for_each_if ((intel_connector)->base.encoder == (__encoder)) #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - if ((1 << (domain)) & (mask)) + for_each_if ((1 << (domain)) & (mask)) struct drm_i915_private; struct i915_mm_struct; @@ -630,11 +628,9 @@ struct drm_i915_display_funcs { int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); + int (*compute_pipe_wm)(struct intel_crtc *crtc, + struct drm_atomic_state *state); void (*update_wm)(struct drm_crtc *crtc); - void (*update_sprite_wm)(struct drm_plane *plane, - struct drm_crtc *crtc, - uint32_t sprite_width, uint32_t sprite_height, - int pixel_size, bool enable, bool scaled); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, @@ -692,18 +688,18 @@ struct intel_uncore_funcs { void (*force_wake_put)(struct drm_i915_private *dev_priv, enum forcewake_domains domains); - uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); - uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); - uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); - uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); + uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); + uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); + uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); + uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); - void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, + void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, uint8_t val, bool trace); - void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, + void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, uint16_t val, bool trace); - void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, + void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, uint32_t val, bool trace); - void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, + void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r, uint64_t val, bool trace); }; @@ -720,11 +716,11 @@ struct intel_uncore { enum forcewake_domain_id id; unsigned wake_count; struct timer_list timer; - u32 reg_set; + i915_reg_t reg_set; u32 val_set; u32 val_clear; - u32 reg_ack; - u32 reg_post; + i915_reg_t reg_ack; + i915_reg_t reg_post; u32 val_reset; } fw_domain[FW_DOMAIN_ID_COUNT]; }; @@ -734,25 +730,24 @@ struct intel_uncore { for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ (i__) < FW_DOMAIN_ID_COUNT; \ (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ - if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) + for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) #define for_each_fw_domain(domain__, dev_priv__, i__) \ for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) -enum csr_state { - FW_UNINITIALIZED = 0, - FW_LOADED, - FW_FAILED -}; +#define CSR_VERSION(major, minor) ((major) << 16 | (minor)) +#define CSR_VERSION_MAJOR(version) ((version) >> 16) +#define CSR_VERSION_MINOR(version) ((version) & 0xffff) struct intel_csr { + struct work_struct work; const char *fw_path; uint32_t *dmc_payload; uint32_t dmc_fw_size; + uint32_t version; uint32_t mmio_count; - uint32_t mmioaddr[8]; + i915_reg_t mmioaddr[8]; uint32_t mmiodata[8]; - enum csr_state state; }; #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -770,6 +765,8 @@ struct intel_csr { func(is_valleyview) sep \ func(is_haswell) sep \ func(is_skylake) sep \ + func(is_broxton) sep \ + func(is_kabylake) sep \ func(is_preliminary) sep \ func(has_fbc) sep \ func(has_pipe_cxsr) sep \ @@ -928,24 +925,7 @@ struct i915_fbc { struct drm_framebuffer *fb; } *fbc_work; - enum no_fbc_reason { - FBC_OK, /* FBC is enabled */ - FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ - FBC_NO_OUTPUT, /* no outputs enabled to compress */ - FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ - FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ - FBC_MODE_TOO_LARGE, /* mode too large for compression */ - FBC_BAD_PLANE, /* fbc not supported on plane */ - FBC_NOT_TILED, /* buffer not tiled */ - FBC_MULTIPLE_PIPES, /* more than one pipe active */ - FBC_MODULE_PARAM, - FBC_CHIP_DEFAULT, /* disabled by default on this chip */ - FBC_ROTATION, /* rotation is not supported */ - FBC_IN_DBG_MASTER, /* kernel debugger is active */ - FBC_BAD_STRIDE, /* stride is not supported */ - FBC_PIXEL_RATE, /* pixel rate is too big */ - FBC_PIXEL_FORMAT /* pixel format is invalid */ - } no_fbc_reason; + const char *no_fbc_reason; bool (*fbc_enabled)(struct drm_i915_private *dev_priv); void (*enable_fbc)(struct intel_crtc *crtc); @@ -1019,7 +999,7 @@ struct intel_gmbus { struct i2c_adapter adapter; u32 force_bit; u32 reg0; - u32 gpio_reg; + i915_reg_t gpio_reg; struct i2c_algo_bit_data bit_algo; struct drm_i915_private *dev_priv; }; @@ -1668,7 +1648,7 @@ struct i915_frontbuffer_tracking { }; struct i915_wa_reg { - u32 addr; + i915_reg_t addr; u32 value; /* bitmask representing WA bits */ u32 mask; @@ -1697,6 +1677,13 @@ struct i915_execbuffer_params { struct drm_i915_gem_request *request; }; +/* used in computing the new watermarks state */ +struct intel_wm_config { + unsigned int num_pipes_active; + bool sprites_enabled; + bool sprites_scaled; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *objects; @@ -1717,9 +1704,6 @@ struct drm_i915_private { struct intel_csr csr; - /* Display CSR-related protection */ - struct mutex csr_lock; - struct intel_gmbus gmbus[GMBUS_NUM_PINS]; /** gmbus_mutex protects against concurrent usage of the single hw gmbus @@ -1734,6 +1718,8 @@ struct drm_i915_private { /* MMIO base address for MIPI regs */ uint32_t mipi_mmio_base; + uint32_t psr_mmio_base; + wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; @@ -1921,6 +1907,9 @@ struct drm_i915_private { */ uint16_t skl_latency[8]; + /* Committed wm config */ + struct intel_wm_config config; + /* * The skl_wm_values structure is a bit too big for stack * allocation, so we keep the staging struct where we store @@ -1955,6 +1944,8 @@ struct drm_i915_private { /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; + struct intel_encoder *dig_port_map[I915_MAX_PORTS]; + /* * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * will be rejected. Instead look for a better place. @@ -1979,7 +1970,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) /* Iterate over initialised rings */ #define for_each_ring(ring__, dev_priv__, i__) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ - if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) + for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ @@ -2435,6 +2426,15 @@ struct drm_i915_cmd_table { #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) +#define REVID_FOREVER 0xff +/* + * Return true if revision is in range [since,until] inclusive. + * + * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. + */ +#define IS_REVID(p, since, until) \ + (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) + #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) @@ -2461,7 +2461,8 @@ struct drm_i915_cmd_table { #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) -#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) +#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) +#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) @@ -2496,16 +2497,21 @@ struct drm_i915_cmd_table { #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) -#define SKL_REVID_A0 (0x0) -#define SKL_REVID_B0 (0x1) -#define SKL_REVID_C0 (0x2) -#define SKL_REVID_D0 (0x3) -#define SKL_REVID_E0 (0x4) -#define SKL_REVID_F0 (0x5) +#define SKL_REVID_A0 0x0 +#define SKL_REVID_B0 0x1 +#define SKL_REVID_C0 0x2 +#define SKL_REVID_D0 0x3 +#define SKL_REVID_E0 0x4 +#define SKL_REVID_F0 0x5 + +#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) -#define BXT_REVID_A0 (0x0) -#define BXT_REVID_B0 (0x3) -#define BXT_REVID_C0 (0x9) +#define BXT_REVID_A0 0x0 +#define BXT_REVID_A1 0x1 +#define BXT_REVID_B0 0x3 +#define BXT_REVID_C0 0x9 + +#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) /* * The genX designation typically refers to the render engine, so render @@ -2577,10 +2583,10 @@ struct drm_i915_cmd_table { #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ - IS_SKYLAKE(dev)) + IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ - IS_SKYLAKE(dev)) + IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) @@ -2640,6 +2646,7 @@ struct i915_params { int panel_use_ssc; int vbt_sdvo_panel_type; int enable_rc6; + int enable_dc; int enable_fbc; int enable_ppgtt; int enable_execlists; @@ -2688,7 +2695,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); -void i915_firmware_load_error_print(const char *fw_path, int err); /* intel_hotplug.c */ void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); @@ -2995,8 +3001,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined, - struct drm_i915_gem_request **pipelined_request, const struct i915_ggtt_view *view); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); @@ -3351,7 +3355,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); -extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev); @@ -3434,6 +3437,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) +#define __raw_read(x, s) \ +static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ + i915_reg_t reg) \ +{ \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ +} + +#define __raw_write(x, s) \ +static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ + i915_reg_t reg, uint##x##_t val) \ +{ \ + write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ +} +__raw_read(8, b) +__raw_read(16, w) +__raw_read(32, l) +__raw_read(64, q) + +__raw_write(8, b) +__raw_write(16, w) +__raw_write(32, l) +__raw_write(64, q) + +#undef __raw_read +#undef __raw_write + /* These are untraced mmio-accessors that are only valid to be used inside * criticial sections inside IRQ handlers where forcewake is explicitly * controlled. @@ -3441,8 +3470,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); * Note: Should only be used between intel_uncore_forcewake_irqlock() and * intel_uncore_forcewake_irqunlock(). */ -#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) -#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) +#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) +#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) /* "Broadcast RGB" property */ @@ -3450,7 +3479,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define INTEL_BROADCAST_RGB_FULL 1 #define INTEL_BROADCAST_RGB_LIMITED 2 -static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) +static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) { if (IS_VALLEYVIEW(dev)) return VLV_VGACNTRL; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 91bb1fc27420..a6997a8a3eaa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req, if (i915_gem_request_completed(req, true)) return 0; - timeout_expire = timeout ? - jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; + timeout_expire = 0; + if (timeout) { + if (WARN_ON(*timeout < 0)) + return -EINVAL; + + if (*timeout == 0) + return -ETIME; + + timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + } if (INTEL_INFO(dev_priv)->gen >= 6) gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); @@ -2737,6 +2745,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, struct intel_engine_cs *ring) { + struct intel_ringbuffer *buffer; + while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; @@ -2752,18 +2762,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, * are the ones that keep the context and ringbuffer backing objects * pinned in place. */ - while (!list_empty(&ring->execlist_queue)) { - struct drm_i915_gem_request *submit_req; - submit_req = list_first_entry(&ring->execlist_queue, - struct drm_i915_gem_request, - execlist_link); - list_del(&submit_req->execlist_link); + if (i915.enable_execlists) { + spin_lock_irq(&ring->execlist_lock); + while (!list_empty(&ring->execlist_queue)) { + struct drm_i915_gem_request *submit_req; - if (submit_req->ctx != ring->default_context) - intel_lr_context_unpin(submit_req); + submit_req = list_first_entry(&ring->execlist_queue, + struct drm_i915_gem_request, + execlist_link); + list_del(&submit_req->execlist_link); - i915_gem_request_unreference(submit_req); + if (submit_req->ctx != ring->default_context) + intel_lr_context_unpin(submit_req); + + i915_gem_request_unreference(submit_req); + } + spin_unlock_irq(&ring->execlist_lock); } /* @@ -2782,6 +2797,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, i915_gem_request_retire(request); } + + /* Having flushed all requests from all queues, we know that all + * ringbuffers must now be empty. However, since we do not reclaim + * all space when retiring the request (to prevent HEADs colliding + * with rapid ringbuffer wraparound) the amount of available space + * upon reset is less than when we start. Do one more pass over + * all the ringbuffers to reset last_retired_head. + */ + list_for_each_entry(buffer, &ring->buffers, link) { + buffer->last_retired_head = buffer->tail; + intel_ring_update_space(buffer); + } } void i915_gem_reset(struct drm_device *dev) @@ -3826,7 +3853,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, * cacheline, whereas normally such cachelines would get * invalidated. */ - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) return -ENODEV; level = I915_CACHE_LLC; @@ -3869,17 +3896,11 @@ rpm_put: int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined, - struct drm_i915_gem_request **pipelined_request, const struct i915_ggtt_view *view) { u32 old_read_domains, old_write_domain; int ret; - ret = i915_gem_object_sync(obj, pipelined, pipelined_request); - if (ret) - return ret; - /* Mark the pin_display early so that we account for the * display coherency whilst setting up the cache domains. */ @@ -4476,10 +4497,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, { struct i915_vma *vma; list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) + if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && + vma->vm == vm) return vma; } return NULL; @@ -4568,7 +4587,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) struct intel_engine_cs *ring = req->ring; struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; int i, ret; @@ -4584,10 +4602,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) * here because no other code should access these registers other than * at initialization time. */ - for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { + for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, reg_base + i); - intel_ring_emit(ring, remap_info[i/4]); + intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); + intel_ring_emit(ring, remap_info[i]); } intel_ring_advance(ring); @@ -4755,18 +4773,9 @@ i915_gem_init_hw(struct drm_device *dev) if (HAS_GUC_UCODE(dev)) { ret = intel_guc_ucode_load(dev); if (ret) { - /* - * If we got an error and GuC submission is enabled, map - * the error to -EIO so the GPU will be declared wedged. - * OTOH, if we didn't intend to use the GuC anyway, just - * discard the error and carry on. - */ - DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret, - i915.enable_guc_submission ? "" : - " (ignored)"); - ret = i915.enable_guc_submission ? -EIO : 0; - if (ret) - goto out; + DRM_ERROR("Failed to initialize GuC, error %d\n", ret); + ret = -EIO; + goto out; } } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8c688a5f1589..4b9400402aa3 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -556,7 +556,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) if (signaller == ring) continue; - intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } } @@ -581,7 +581,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) if (signaller == ring) continue; - intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } } @@ -925,6 +925,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_NO_ZEROMAP: args->value = ctx->flags & CONTEXT_NO_ZEROMAP; break; + case I915_CONTEXT_PARAM_GTT_SIZE: + if (ctx->ppgtt) + args->value = ctx->ppgtt->base.total; + else if (to_i915(dev)->mm.aliasing_ppgtt) + args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; + else + args->value = to_i915(dev)->gtt.base.total; + break; default: ret = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6ed7d63a0688..a4c243cec4aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1114,7 +1114,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, for (i = 0; i < 4; i++) { intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); + intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i)); intel_ring_emit(ring, 0); } @@ -1241,7 +1241,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, INSTPM); + intel_ring_emit_reg(ring, INSTPM); intel_ring_emit(ring, instp_mask << 16 | instp_mode); intel_ring_advance(ring); diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 40a10b25956c..598198543dcd 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; - int fence_reg_lo, fence_reg_hi; + i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; if (INTEL_INFO(dev)->gen >= 6) { @@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } /* check for L-shaped memory aka modified enhanced addressing */ - if (IS_GEN4(dev)) { - uint32_t ddc2 = I915_READ(DCC2); - - if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) - dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + if (IS_GEN4(dev) && + !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } if (dcc == 0xffffffff) { @@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) * matching, which was the case for the swizzling required in * the table above, or from the 1-ch value being less than * the minimum size of a rank. + * + * Reports indicate that the swizzling actually + * varies depending upon page placement inside the + * channels, i.e. we see swizzled pages where the + * banks of memory are paired and unswizzled on the + * uneven portion, so leave that as unknown. */ - if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { - swizzle_x = I915_BIT_6_SWIZZLE_NONE; - swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else { + if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } } + if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || + swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { + /* Userspace likes to explode if it sees unknown swizzling, + * so lie. We will finish the lie when reporting through + * the get-tiling-ioctl by reporting the physical swizzle + * mode as unknown instead. + * + * As we don't strictly know what the swizzling is, it may be + * bit17 dependent, and so we need to also prevent the pages + * from being moved. + */ + dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } + dev_priv->mm.bit_6_swizzle_x = swizzle_x; dev_priv->mm.bit_6_swizzle_y = swizzle_y; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 43f35d12b677..1f7e6b9df45d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -24,6 +24,7 @@ */ #include <linux/seq_file.h> +#include <linux/stop_machine.h> #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -104,9 +105,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) { bool has_aliasing_ppgtt; bool has_full_ppgtt; + bool has_full_48bit_ppgtt; has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; + has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; if (intel_vgpu_active(dev)) has_full_ppgtt = false; /* emulation is too hard */ @@ -125,6 +128,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) if (enable_ppgtt == 2 && has_full_ppgtt) return 2; + if (enable_ppgtt == 3 && has_full_48bit_ppgtt) + return 3; + #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { @@ -141,7 +147,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) } if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) - return 2; + return has_full_48bit_ppgtt ? 3 : 2; else return has_aliasing_ppgtt ? 1 : 0; } @@ -661,10 +667,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, return ret; intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry)); intel_ring_emit(ring, upper_32_bits(addr)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry)); intel_ring_emit(ring, lower_32_bits(addr)); intel_ring_advance(ring); @@ -904,14 +910,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) enum vgt_g2v_type msg; struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - unsigned int offset = vgtif_reg(pdp0_lo); int i; if (USES_FULL_48BIT_PPGTT(dev)) { u64 daddr = px_dma(&ppgtt->pml4); - I915_WRITE(offset, lower_32_bits(daddr)); - I915_WRITE(offset + 4, upper_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); @@ -919,10 +924,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) for (i = 0; i < GEN8_LEGACY_PDPES; i++) { u64 daddr = i915_page_dir_dma_addr(ppgtt, i); - I915_WRITE(offset, lower_32_bits(daddr)); - I915_WRITE(offset + 4, upper_32_bits(daddr)); - - offset += 8; + I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); } msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : @@ -1662,9 +1665,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, return ret; intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); + intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); intel_ring_emit(ring, PP_DIR_DCLV_2G); - intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); + intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); intel_ring_emit(ring, get_pd_offset(ppgtt)); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); @@ -1699,9 +1702,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, return ret; intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); + intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); intel_ring_emit(ring, PP_DIR_DCLV_2G); - intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); + intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); intel_ring_emit(ring, get_pd_offset(ppgtt)); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); @@ -2528,6 +2531,26 @@ static int ggtt_bind_vma(struct i915_vma *vma, return 0; } +struct ggtt_bind_vma__cb { + struct i915_vma *vma; + enum i915_cache_level cache_level; + u32 flags; +}; + +static int ggtt_bind_vma__cb(void *_arg) +{ + struct ggtt_bind_vma__cb *arg = _arg; + return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags); +} + +static int ggtt_bind_vma__BKL(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct ggtt_bind_vma__cb arg = { vma, cache_level, flags }; + return stop_machine(ggtt_bind_vma__cb, &arg, NULL); +} + static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) @@ -2995,6 +3018,9 @@ static int gen8_gmch_probe(struct drm_device *dev, dev_priv->gtt.base.bind_vma = ggtt_bind_vma; dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; + if (IS_CHERRYVIEW(dev)) + dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL; + return ret; } @@ -3302,7 +3328,7 @@ static struct sg_table * intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, struct drm_i915_gem_object *obj) { - struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; + struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info; unsigned int size_pages = rot_info->size >> PAGE_SHIFT; unsigned int size_pages_uv; struct sg_page_iter sg_iter; @@ -3534,7 +3560,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, if (view->type == I915_GGTT_VIEW_NORMAL) { return obj->base.size; } else if (view->type == I915_GGTT_VIEW_ROTATED) { - return view->rotation_info.size; + return view->params.rotation_info.size; } else if (view->type == I915_GGTT_VIEW_PARTIAL) { return view->params.partial.size << PAGE_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index a216397ead52..877c32c78a6a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -156,13 +156,10 @@ struct i915_ggtt_view { u64 offset; unsigned int size; } partial; + struct intel_rotation_info rotation_info; } params; struct sg_table *pages; - - union { - struct intel_rotation_info rotation_info; - }; }; extern const struct i915_ggtt_view i915_ggtt_view_normal; @@ -556,7 +553,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a, if (a->type != b->type) return false; - if (a->type == I915_GGTT_VIEW_PARTIAL) + if (a->type != I915_GGTT_VIEW_NORMAL) return !memcmp(&a->params, &b->params, sizeof(a->params)); return true; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index cdacf3f5b77a..598ed2facf85 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -433,7 +433,8 @@ int i915_gem_init_stolen(struct drm_device *dev) &reserved_size); break; default: - if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) + if (IS_BROADWELL(dev_priv) || + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev)) bdw_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); else diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 8a6717cc265c..7410f6c962e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } + intel_runtime_pm_get(dev_priv); + mutex_lock(&dev->struct_mutex); if (obj->pin_display || obj->framebuffer_references) { ret = -EBUSY; @@ -269,6 +271,8 @@ err: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); + intel_runtime_pm_put(dev_priv); + return ret; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2f04e4f2ff35..06ca4082735b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -366,6 +366,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); err_printf(m, "IOMMU enabled?: %d\n", error->iommu); + + if (HAS_CSR(dev)) { + struct intel_csr *csr = &dev_priv->csr; + + err_printf(m, "DMC loaded: %s\n", + yesno(csr->dmc_payload != NULL)); + err_printf(m, "DMC fw version: %d.%d\n", + CSR_VERSION_MAJOR(csr->version), + CSR_VERSION_MINOR(csr->version)); + } + err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "IER: 0x%08x\n", error->ier); if (INTEL_INFO(dev)->gen >= 8) { @@ -862,7 +873,7 @@ static void i915_record_ring_state(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { - ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); + ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base)); ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); if (INTEL_INFO(dev)->gen >= 8) gen8_record_semaphore_state(dev_priv, error, ring, ering); @@ -899,7 +910,7 @@ static void i915_record_ring_state(struct drm_device *dev, ering->ctl = I915_READ_CTL(ring); if (I915_NEED_GFX_HWS(dev)) { - int mmio; + i915_reg_t mmio; if (IS_GEN7(dev)) { switch (ring->id) { @@ -1071,6 +1082,25 @@ static void i915_gem_record_rings(struct drm_device *dev, list_for_each_entry(request, &ring->request_list, list) { struct drm_i915_error_request *erq; + if (count >= error->ring[i].num_requests) { + /* + * If the ring request list was changed in + * between the point where the error request + * list was created and dimensioned and this + * point then just exit early to avoid crashes. + * + * We don't need to communicate that the + * request list changed state during error + * state capture and that the error state is + * slightly incorrect as a consequence since we + * are typically only interested in the request + * list state at the point of error state + * capture, not in any changes happening during + * the capture. + */ + break; + } + erq = &error->ring[i].requests[count++]; erq->seqno = request->seqno; erq->jiffies = request->emitted_jiffies; @@ -1181,7 +1211,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, if (IS_VALLEYVIEW(dev)) { error->gtier[0] = I915_READ(GTIER); error->ier = I915_READ(VLV_IER); - error->forcewake = I915_READ(FORCEWAKE_VLV); + error->forcewake = I915_READ_FW(FORCEWAKE_VLV); } if (IS_GEN7(dev)) @@ -1193,14 +1223,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, } if (IS_GEN6(dev)) { - error->forcewake = I915_READ(FORCEWAKE); + error->forcewake = I915_READ_FW(FORCEWAKE); error->gab_ctl = I915_READ(GAB_CTL); error->gfx_mode = I915_READ(GFX_MODE); } /* 2: Registers which belong to multiple generations */ if (INTEL_INFO(dev)->gen >= 7) - error->forcewake = I915_READ(FORCEWAKE_MT); + error->forcewake = I915_READ_FW(FORCEWAKE_MT); if (INTEL_INFO(dev)->gen >= 6) { error->derrmr = I915_READ(DERRMR); diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index c4cb1c0c4d0d..685c7991e24f 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -26,7 +26,7 @@ /* Definitions of GuC H/W registers, bits, etc */ -#define GUC_STATUS 0xc000 +#define GUC_STATUS _MMIO(0xc000) #define GS_BOOTROM_SHIFT 1 #define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) #define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) @@ -39,40 +39,41 @@ #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) -#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) +#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) -#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4) -#define DMA_ADDR_0_LOW 0xc300 -#define DMA_ADDR_0_HIGH 0xc304 -#define DMA_ADDR_1_LOW 0xc308 -#define DMA_ADDR_1_HIGH 0xc30c +#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) +#define UOS_RSA_SCRATCH_MAX_COUNT 64 +#define DMA_ADDR_0_LOW _MMIO(0xc300) +#define DMA_ADDR_0_HIGH _MMIO(0xc304) +#define DMA_ADDR_1_LOW _MMIO(0xc308) +#define DMA_ADDR_1_HIGH _MMIO(0xc30c) #define DMA_ADDRESS_SPACE_WOPCM (7 << 16) #define DMA_ADDRESS_SPACE_GTT (8 << 16) -#define DMA_COPY_SIZE 0xc310 -#define DMA_CTRL 0xc314 +#define DMA_COPY_SIZE _MMIO(0xc310) +#define DMA_CTRL _MMIO(0xc314) #define UOS_MOVE (1<<4) #define START_DMA (1<<0) -#define DMA_GUC_WOPCM_OFFSET 0xc340 +#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340) #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ -#define GUC_MAX_IDLE_COUNT 0xC3E4 +#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) -#define GUC_WOPCM_SIZE 0xc050 +#define GUC_WOPCM_SIZE _MMIO(0xc050) #define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ #define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) -#define GEN8_GT_PM_CONFIG 0x138140 -#define GEN9LP_GT_PM_CONFIG 0x138140 -#define GEN9_GT_PM_CONFIG 0x13816c +#define GEN8_GT_PM_CONFIG _MMIO(0x138140) +#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) +#define GEN9_GT_PM_CONFIG _MMIO(0x13816c) #define GT_DOORBELL_ENABLE (1<<0) -#define GEN8_GTCR 0x4274 +#define GEN8_GTCR _MMIO(0x4274) #define GEN8_GTCR_INVALIDATE (1<<0) -#define GUC_ARAT_C6DIS 0xA178 +#define GUC_ARAT_C6DIS _MMIO(0xA178) -#define GUC_SHIM_CONTROL 0xc064 +#define GUC_SHIM_CONTROL _MMIO(0xc064) #define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) #define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) #define GUC_ENABLE_MIA_CACHING (1<<2) @@ -89,21 +90,21 @@ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ GUC_ENABLE_MIA_CLOCK_GATING) -#define HOST2GUC_INTERRUPT 0xc4c8 +#define HOST2GUC_INTERRUPT _MMIO(0xc4c8) #define HOST2GUC_TRIGGER (1<<0) #define DRBMISC1 0x1984 #define DOORBELL_ENABLE (1<<0) -#define GEN8_DRBREGL(x) (0x1000 + (x) * 8) +#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) #define GEN8_DRB_VALID (1<<0) -#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4) +#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) -#define DE_GUCRMR 0x44054 +#define DE_GUCRMR _MMIO(0x44054) -#define GUC_BCS_RCS_IER 0xC550 -#define GUC_VCS2_VCS1_IER 0xC554 -#define GUC_WD_VECS_IER 0xC558 -#define GUC_PM_P24C_IER 0xC55C +#define GUC_BCS_RCS_IER _MMIO(0xC550) +#define GUC_VCS2_VCS1_IER _MMIO(0xC554) +#define GUC_WD_VECS_IER _MMIO(0xC558) +#define GUC_PM_P24C_IER _MMIO(0xC55C) #endif diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 036b42bae827..ed9f1002ab36 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -27,7 +27,7 @@ #include "intel_guc.h" /** - * DOC: GuC Client + * DOC: GuC-based command submission * * i915_guc_client: * We use the term client to avoid confusion with contexts. A i915_guc_client is @@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ if (!intel_enable_rc6(dev_priv->dev) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || - (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || - (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) + IS_BXT_REVID(dev, 0, BXT_REVID_A1) || + (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) || + (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0))) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -258,7 +258,7 @@ static void guc_disable_doorbell(struct intel_guc *guc, struct drm_i915_private *dev_priv = guc_to_i915(guc); struct guc_doorbell_info *doorbell; void *base; - int drbreg = GEN8_DRBREGL(client->doorbell_id); + i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id); int value; base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); @@ -588,8 +588,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq) /** * i915_guc_submit() - Submit commands through GuC * @client: the guc client where commands will go through - * @ctx: LRC where commands come from - * @ring: HW engine that will excute the commands + * @rq: request associated with the commands * * Return: 0 if succeed */ @@ -731,7 +730,8 @@ static void guc_client_free(struct drm_device *dev, * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, * while a preemption context can use CRITICAL. - * @ctx the context to own the client (we use the default render context) + * @ctx: the context that owns the client (we use the default render + * context) * * Return: An i915_guc_client object if success. */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0d228f909dcb..c8ba94968aaf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) +static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, + i915_reg_t reg) { u32 val = I915_READ(reg); @@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) return; WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", - reg, val); + i915_mmio_reg_offset(reg), val); I915_WRITE(reg, 0xffffffff); POSTING_READ(reg); I915_WRITE(reg, 0xffffffff); @@ -283,17 +284,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) ilk_update_gt_irq(dev_priv, mask, 0); } -static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) +static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) { return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; } -static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) +static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) { return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; } -static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) +static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) { return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; } @@ -350,7 +351,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) void gen6_reset_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t reg = gen6_pm_iir(dev_priv); + i915_reg_t reg = gen6_pm_iir(dev_priv); spin_lock_irq(&dev_priv->irq_lock); I915_WRITE(reg, dev_priv->pm_rps_events); @@ -477,7 +478,7 @@ static void __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 enable_mask, u32 status_mask) { - u32 reg = PIPESTAT(pipe); + i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); @@ -504,7 +505,7 @@ static void __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 enable_mask, u32 status_mask) { - u32 reg = PIPESTAT(pipe); + i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); @@ -665,8 +666,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long high_frame; - unsigned long low_frame; + i915_reg_t high_frame, low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); @@ -717,9 +717,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } -/* raw reads, only for fast reads of display block, no need for forcewake etc. */ -#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) - +/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -733,9 +731,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) vtotal /= 2; if (IS_GEN2(dev)) - position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; + position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; else - position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; /* * On HSW, the DSL reg (0x70000) appears to return 0 if we @@ -827,7 +825,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, * We can split this into vertical and horizontal * scanout position. */ - position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; /* convert to pixel counts */ vbl_start *= htotal; @@ -1188,7 +1186,7 @@ static void ivybridge_parity_work(struct work_struct *work) POSTING_READ(GEN7_MISCCPCTL); while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { - u32 reg; + i915_reg_t reg; slice--; if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) @@ -1196,7 +1194,7 @@ static void ivybridge_parity_work(struct work_struct *work) dev_priv->l3_parity.which_slice &= ~(1<<slice); - reg = GEN7_L3CDERRST1 + (slice * 0x200); + reg = GEN7_L3CDERRST1(slice); error_status = I915_READ(reg); row = GEN7_PARITY_ERROR_ROW(error_status); @@ -1290,70 +1288,69 @@ static void snb_gt_irq_handler(struct drm_device *dev, ivybridge_parity_error_irq_handler(dev, gt_iir); } +static __always_inline void +gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift) +{ + if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) + notify_ring(ring); + if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) + intel_lrc_irq_handler(ring); +} + static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { irqreturn_t ret = IRQ_NONE; if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { - u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); - if (tmp) { - I915_WRITE_FW(GEN8_GT_IIR(0), tmp); + u32 iir = I915_READ_FW(GEN8_GT_IIR(0)); + if (iir) { + I915_WRITE_FW(GEN8_GT_IIR(0), iir); ret = IRQ_HANDLED; - if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) - intel_lrc_irq_handler(&dev_priv->ring[RCS]); - if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) - notify_ring(&dev_priv->ring[RCS]); + gen8_cs_irq_handler(&dev_priv->ring[RCS], + iir, GEN8_RCS_IRQ_SHIFT); - if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) - intel_lrc_irq_handler(&dev_priv->ring[BCS]); - if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) - notify_ring(&dev_priv->ring[BCS]); + gen8_cs_irq_handler(&dev_priv->ring[BCS], + iir, GEN8_BCS_IRQ_SHIFT); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { - u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); - if (tmp) { - I915_WRITE_FW(GEN8_GT_IIR(1), tmp); + u32 iir = I915_READ_FW(GEN8_GT_IIR(1)); + if (iir) { + I915_WRITE_FW(GEN8_GT_IIR(1), iir); ret = IRQ_HANDLED; - if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) - intel_lrc_irq_handler(&dev_priv->ring[VCS]); - if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) - notify_ring(&dev_priv->ring[VCS]); + gen8_cs_irq_handler(&dev_priv->ring[VCS], + iir, GEN8_VCS1_IRQ_SHIFT); - if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) - intel_lrc_irq_handler(&dev_priv->ring[VCS2]); - if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) - notify_ring(&dev_priv->ring[VCS2]); + gen8_cs_irq_handler(&dev_priv->ring[VCS2], + iir, GEN8_VCS2_IRQ_SHIFT); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } if (master_ctl & GEN8_GT_VECS_IRQ) { - u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); - if (tmp) { - I915_WRITE_FW(GEN8_GT_IIR(3), tmp); + u32 iir = I915_READ_FW(GEN8_GT_IIR(3)); + if (iir) { + I915_WRITE_FW(GEN8_GT_IIR(3), iir); ret = IRQ_HANDLED; - if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) - intel_lrc_irq_handler(&dev_priv->ring[VECS]); - if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) - notify_ring(&dev_priv->ring[VECS]); + gen8_cs_irq_handler(&dev_priv->ring[VECS], + iir, GEN8_VECS_IRQ_SHIFT); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } if (master_ctl & GEN8_GT_PM_IRQ) { - u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); - if (tmp & dev_priv->pm_rps_events) { + u32 iir = I915_READ_FW(GEN8_GT_IIR(2)); + if (iir & dev_priv->pm_rps_events) { I915_WRITE_FW(GEN8_GT_IIR(2), - tmp & dev_priv->pm_rps_events); + iir & dev_priv->pm_rps_events); ret = IRQ_HANDLED; - gen6_rps_irq_handler(dev_priv, tmp); + gen6_rps_irq_handler(dev_priv, iir); } else DRM_ERROR("The master control interrupt lied (PM)!\n"); } @@ -1625,7 +1622,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) spin_lock(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - int reg; + i915_reg_t reg; u32 mask, iir_bit = 0; /* @@ -2354,9 +2351,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) spt_irq_handler(dev, pch_iir); else cpt_irq_handler(dev, pch_iir); - } else - DRM_ERROR("The master control interrupt lied (SDE)!\n"); - + } else { + /* + * Like on previous PCH there seems to be something + * fishy going on with forwarding PCH interrupts. + */ + DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); + } } I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); @@ -3869,7 +3870,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); for_each_pipe(dev_priv, pipe) { - int reg = PIPESTAT(pipe); + i915_reg_t reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* @@ -4050,7 +4051,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); for_each_pipe(dev_priv, pipe) { - int reg = PIPESTAT(pipe); + i915_reg_t reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* Clear the PIPE*STAT regs before the IIR */ @@ -4272,7 +4273,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); for_each_pipe(dev_priv, pipe) { - int reg = PIPESTAT(pipe); + i915_reg_t reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 4be13a5eb932..835d6099c769 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -32,6 +32,7 @@ struct i915_params i915 __read_mostly = { .panel_use_ssc = -1, .vbt_sdvo_panel_type = -1, .enable_rc6 = -1, + .enable_dc = -1, .enable_fbc = -1, .enable_execlists = -1, .enable_hangcheck = true, @@ -80,6 +81,11 @@ MODULE_PARM_DESC(enable_rc6, "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); +module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400); +MODULE_PARM_DESC(enable_dc, + "Enable power-saving display C-states. " + "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); + module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); MODULE_PARM_DESC(enable_fbc, "Enable frame buffer compression for power savings " @@ -112,7 +118,7 @@ MODULE_PARM_DESC(enable_hangcheck, module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); MODULE_PARM_DESC(enable_ppgtt, "Override PPGTT usage. " - "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); + "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); MODULE_PARM_DESC(enable_execlists, @@ -126,7 +132,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i MODULE_PARM_DESC(preliminary_hw_support, "Enable preliminary hardware support."); -module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600); +module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400); MODULE_PARM_DESC(disable_power_well, "Disable display power wells when possible " "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bc7b8faba84d..1a12d44b9710 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,14 +25,43 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ +typedef struct { + uint32_t reg; +} i915_reg_t; + +#define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) + +#define INVALID_MMIO_REG _MMIO(0) + +static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg) +{ + return reg.reg; +} + +static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b) +{ + return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b); +} + +static inline bool i915_mmio_reg_valid(i915_reg_t reg) +{ + return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); +} + #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) #define _PLANE(plane, a, b) _PIPE(plane, a, b) -#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) +#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) +#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a))) +#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) +#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ (pipe) == PIPE_B ? (b) : (c)) +#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c)) #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ (port) == PORT_B ? (b) : (c)) +#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c)) #define _MASKED_FIELD(mask, value) ({ \ if (__builtin_constant_p(mask)) \ @@ -105,14 +134,14 @@ #define GRDOM_RESET_STATUS (1<<1) #define GRDOM_RESET_ENABLE (1<<0) -#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4) +#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) #define ILK_GRDOM_FULL (0<<1) #define ILK_GRDOM_RENDER (1<<1) #define ILK_GRDOM_MEDIA (3<<1) #define ILK_GRDOM_MASK (3<<1) #define ILK_GRDOM_RESET_ENABLE (1<<0) -#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ +#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */ #define GEN6_MBC_SNPCR_SHIFT 21 #define GEN6_MBC_SNPCR_MASK (3<<21) #define GEN6_MBC_SNPCR_MAX (0<<21) @@ -120,31 +149,31 @@ #define GEN6_MBC_SNPCR_LOW (2<<21) #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ -#define VLV_G3DCTL 0x9024 -#define VLV_GSCKGCTL 0x9028 +#define VLV_G3DCTL _MMIO(0x9024) +#define VLV_GSCKGCTL _MMIO(0x9028) -#define GEN6_MBCTL 0x0907c +#define GEN6_MBCTL _MMIO(0x0907c) #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) #define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) #define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) #define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) -#define GEN6_GDRST 0x941c +#define GEN6_GDRST _MMIO(0x941c) #define GEN6_GRDOM_FULL (1 << 0) #define GEN6_GRDOM_RENDER (1 << 1) #define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_BLT (1 << 3) -#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) -#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) -#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) +#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228) +#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518) +#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220) #define PP_DIR_DCLV_2G 0xffffffff -#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) -#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) +#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4) +#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8) -#define GEN8_R_PWR_CLK_STATE 0x20C8 +#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8) #define GEN8_RPCS_ENABLE (1 << 31) #define GEN8_RPCS_S_CNT_ENABLE (1 << 18) #define GEN8_RPCS_S_CNT_SHIFT 15 @@ -157,7 +186,7 @@ #define GEN8_RPCS_EU_MIN_SHIFT 0 #define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT) -#define GAM_ECOCHK 0x4090 +#define GAM_ECOCHK _MMIO(0x4090) #define BDW_DISABLE_HDC_INVALIDATION (1<<25) #define ECOCHK_SNB_BIT (1<<10) #define ECOCHK_DIS_TLB (1<<8) @@ -170,15 +199,15 @@ #define ECOCHK_PPGTT_WT_HSW (0x2<<3) #define ECOCHK_PPGTT_WB_HSW (0x3<<3) -#define GAC_ECO_BITS 0x14090 +#define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_PPGTT_CACHE64B (3<<8) #define ECOBITS_PPGTT_CACHE4B (0<<8) -#define GAB_CTL 0x24000 +#define GAB_CTL _MMIO(0x24000) #define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) -#define GEN6_STOLEN_RESERVED 0x1082C0 +#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0) #define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20) #define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18) #define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4) @@ -200,6 +229,7 @@ #define VGA_ST01_MDA 0x3ba #define VGA_ST01_CGA 0x3da +#define _VGA_MSR_WRITE _MMIO(0x3c2) #define VGA_MSR_WRITE 0x3c2 #define VGA_MSR_READ 0x3cc #define VGA_MSR_MEM_EN (1<<1) @@ -377,10 +407,12 @@ #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) #define MI_BATCH_RESOURCE_STREAMER (1<<10) -#define MI_PREDICATE_SRC0 (0x2400) -#define MI_PREDICATE_SRC1 (0x2408) +#define MI_PREDICATE_SRC0 _MMIO(0x2400) +#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4) +#define MI_PREDICATE_SRC1 _MMIO(0x2408) +#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4) -#define MI_PREDICATE_RESULT_2 (0x2214) +#define MI_PREDICATE_RESULT_2 _MMIO(0x2214) #define LOWER_SLICE_ENABLED (1<<0) #define LOWER_SLICE_DISABLED (0<<0) @@ -509,49 +541,61 @@ /* * Registers used only by the command parser */ -#define BCS_SWCTRL 0x22200 - -#define GPGPU_THREADS_DISPATCHED 0x2290 -#define HS_INVOCATION_COUNT 0x2300 -#define DS_INVOCATION_COUNT 0x2308 -#define IA_VERTICES_COUNT 0x2310 -#define IA_PRIMITIVES_COUNT 0x2318 -#define VS_INVOCATION_COUNT 0x2320 -#define GS_INVOCATION_COUNT 0x2328 -#define GS_PRIMITIVES_COUNT 0x2330 -#define CL_INVOCATION_COUNT 0x2338 -#define CL_PRIMITIVES_COUNT 0x2340 -#define PS_INVOCATION_COUNT 0x2348 -#define PS_DEPTH_COUNT 0x2350 +#define BCS_SWCTRL _MMIO(0x22200) + +#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) +#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) +#define HS_INVOCATION_COUNT _MMIO(0x2300) +#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4) +#define DS_INVOCATION_COUNT _MMIO(0x2308) +#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4) +#define IA_VERTICES_COUNT _MMIO(0x2310) +#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4) +#define IA_PRIMITIVES_COUNT _MMIO(0x2318) +#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4) +#define VS_INVOCATION_COUNT _MMIO(0x2320) +#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4) +#define GS_INVOCATION_COUNT _MMIO(0x2328) +#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4) +#define GS_PRIMITIVES_COUNT _MMIO(0x2330) +#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4) +#define CL_INVOCATION_COUNT _MMIO(0x2338) +#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4) +#define CL_PRIMITIVES_COUNT _MMIO(0x2340) +#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4) +#define PS_INVOCATION_COUNT _MMIO(0x2348) +#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4) +#define PS_DEPTH_COUNT _MMIO(0x2350) +#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4) /* There are the 4 64-bit counter registers, one for each stream output */ -#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8) +#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8) +#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4) -#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8) +#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8) +#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4) -#define GEN7_3DPRIM_END_OFFSET 0x2420 -#define GEN7_3DPRIM_START_VERTEX 0x2430 -#define GEN7_3DPRIM_VERTEX_COUNT 0x2434 -#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438 -#define GEN7_3DPRIM_START_INSTANCE 0x243C -#define GEN7_3DPRIM_BASE_VERTEX 0x2440 +#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420) +#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430) +#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434) +#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438) +#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C) +#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440) -#define GEN7_GPGPU_DISPATCHDIMX 0x2500 -#define GEN7_GPGPU_DISPATCHDIMY 0x2504 -#define GEN7_GPGPU_DISPATCHDIMZ 0x2508 +#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500) +#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504) +#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508) -#define OACONTROL 0x2360 +#define OACONTROL _MMIO(0x2360) #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 -#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \ - _GEN7_PIPEA_DE_LOAD_SL, \ - _GEN7_PIPEB_DE_LOAD_SL) +#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL) /* * Reset registers */ -#define DEBUG_RESET_I830 0x6070 +#define DEBUG_RESET_I830 _MMIO(0x6070) #define DEBUG_RESET_FULL (1<<7) #define DEBUG_RESET_RENDER (1<<8) #define DEBUG_RESET_DISPLAY (1<<9) @@ -559,7 +603,7 @@ /* * IOSF sideband */ -#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100) +#define VLV_IOSF_DOORBELL_REQ _MMIO(VLV_DISPLAY_BASE + 0x2100) #define IOSF_DEVFN_SHIFT 24 #define IOSF_OPCODE_SHIFT 16 #define IOSF_PORT_SHIFT 8 @@ -576,8 +620,8 @@ #define IOSF_PORT_CCU 0xA9 #define IOSF_PORT_GPS_CORE 0x48 #define IOSF_PORT_FLISDSI 0x1B -#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) -#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) +#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) +#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) /* See configdb bunit SB addr map */ #define BUNIT_REG_BISOC 0x11 @@ -609,6 +653,7 @@ /* See the PUNIT HAS v0.8 for the below bits */ enum punit_power_well { + /* These numbers are fixed and must match the position of the pw bits */ PUNIT_POWER_WELL_RENDER = 0, PUNIT_POWER_WELL_MEDIA = 1, PUNIT_POWER_WELL_DISP2D = 3, @@ -621,10 +666,12 @@ enum punit_power_well { PUNIT_POWER_WELL_DPIO_RX1 = 11, PUNIT_POWER_WELL_DPIO_CMN_D = 12, - PUNIT_POWER_WELL_NUM, + /* Not actual bit groups. Used as IDs for lookup_power_well() */ + PUNIT_POWER_WELL_ALWAYS_ON, }; enum skl_disp_power_wells { + /* These numbers are fixed and must match the position of the pw bits */ SKL_DISP_PW_MISC_IO, SKL_DISP_PW_DDI_A_E, SKL_DISP_PW_DDI_B, @@ -632,6 +679,10 @@ enum skl_disp_power_wells { SKL_DISP_PW_DDI_D, SKL_DISP_PW_1 = 14, SKL_DISP_PW_2, + + /* Not actual bit groups. Used as IDs for lookup_power_well() */ + SKL_DISP_PW_ALWAYS_ON, + SKL_DISP_PW_DC_OFF, }; #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) @@ -832,7 +883,7 @@ enum skl_disp_power_wells { */ #define DPIO_DEVFN 0 -#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) +#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110) #define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ #define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ #define DPIO_SFR_BYPASS (1<<1) @@ -1185,9 +1236,9 @@ enum skl_disp_power_wells { #define DPIO_UPAR_SHIFT 30 /* BXT PHY registers */ -#define _BXT_PHY(phy, a, b) _PIPE((phy), (a), (b)) +#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b)) -#define BXT_P_CR_GT_DISP_PWRON 0x138090 +#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) #define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) #define _PHY_CTL_FAMILY_EDP 0x64C80 @@ -1203,7 +1254,7 @@ enum skl_disp_power_wells { #define PORT_PLL_ENABLE (1 << 31) #define PORT_PLL_LOCK (1 << 30) #define PORT_PLL_REF_SEL (1 << 27) -#define BXT_PORT_PLL_ENABLE(port) _PORT(port, _PORT_PLL_A, _PORT_PLL_B) +#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B) #define _PORT_PLL_EBB_0_A 0x162034 #define _PORT_PLL_EBB_0_B 0x6C034 @@ -1214,7 +1265,7 @@ enum skl_disp_power_wells { #define PORT_PLL_P2_SHIFT 8 #define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT) #define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT) -#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \ +#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \ _PORT_PLL_EBB_0_B, \ _PORT_PLL_EBB_0_C) @@ -1223,7 +1274,7 @@ enum skl_disp_power_wells { #define _PORT_PLL_EBB_4_C 0x6C344 #define PORT_PLL_10BIT_CLK_ENABLE (1 << 13) #define PORT_PLL_RECALIBRATE (1 << 14) -#define BXT_PORT_PLL_EBB_4(port) _PORT3(port, _PORT_PLL_EBB_4_A, \ +#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \ _PORT_PLL_EBB_4_B, \ _PORT_PLL_EBB_4_C) @@ -1259,7 +1310,7 @@ enum skl_disp_power_wells { #define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ _PORT_PLL_0_B, \ _PORT_PLL_0_C) -#define BXT_PORT_PLL(port, idx) (_PORT_PLL_BASE(port) + (idx) * 4) +#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4) /* BXT PHY common lane registers */ #define _PORT_CL1CM_DW0_A 0x162000 @@ -1297,7 +1348,7 @@ enum skl_disp_power_wells { _PORT_CL1CM_DW30_A) /* Defined for PHY0 only */ -#define BXT_PORT_CL2CM_DW6_BC 0x6C358 +#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358) #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) /* BXT PHY Ref registers */ @@ -1337,10 +1388,10 @@ enum skl_disp_power_wells { #define _PORT_PCS_DW10_GRP_A 0x162C28 #define _PORT_PCS_DW10_GRP_B 0x6CC28 #define _PORT_PCS_DW10_GRP_C 0x6CE28 -#define BXT_PORT_PCS_DW10_LN01(port) _PORT3(port, _PORT_PCS_DW10_LN01_A, \ +#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \ _PORT_PCS_DW10_LN01_B, \ _PORT_PCS_DW10_LN01_C) -#define BXT_PORT_PCS_DW10_GRP(port) _PORT3(port, _PORT_PCS_DW10_GRP_A, \ +#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \ _PORT_PCS_DW10_GRP_B, \ _PORT_PCS_DW10_GRP_C) #define TX2_SWING_CALC_INIT (1 << 31) @@ -1357,13 +1408,13 @@ enum skl_disp_power_wells { #define _PORT_PCS_DW12_GRP_C 0x6CE30 #define LANESTAGGER_STRAP_OVRD (1 << 6) #define LANE_STAGGER_MASK 0x1F -#define BXT_PORT_PCS_DW12_LN01(port) _PORT3(port, _PORT_PCS_DW12_LN01_A, \ +#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \ _PORT_PCS_DW12_LN01_B, \ _PORT_PCS_DW12_LN01_C) -#define BXT_PORT_PCS_DW12_LN23(port) _PORT3(port, _PORT_PCS_DW12_LN23_A, \ +#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \ _PORT_PCS_DW12_LN23_B, \ _PORT_PCS_DW12_LN23_C) -#define BXT_PORT_PCS_DW12_GRP(port) _PORT3(port, _PORT_PCS_DW12_GRP_A, \ +#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \ _PORT_PCS_DW12_GRP_B, \ _PORT_PCS_DW12_GRP_C) @@ -1377,10 +1428,10 @@ enum skl_disp_power_wells { #define _PORT_TX_DW2_GRP_A 0x162D08 #define _PORT_TX_DW2_GRP_B 0x6CD08 #define _PORT_TX_DW2_GRP_C 0x6CF08 -#define BXT_PORT_TX_DW2_GRP(port) _PORT3(port, _PORT_TX_DW2_GRP_A, \ +#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \ _PORT_TX_DW2_GRP_B, \ _PORT_TX_DW2_GRP_C) -#define BXT_PORT_TX_DW2_LN0(port) _PORT3(port, _PORT_TX_DW2_LN0_A, \ +#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \ _PORT_TX_DW2_LN0_B, \ _PORT_TX_DW2_LN0_C) #define MARGIN_000_SHIFT 16 @@ -1394,10 +1445,10 @@ enum skl_disp_power_wells { #define _PORT_TX_DW3_GRP_A 0x162D0C #define _PORT_TX_DW3_GRP_B 0x6CD0C #define _PORT_TX_DW3_GRP_C 0x6CF0C -#define BXT_PORT_TX_DW3_GRP(port) _PORT3(port, _PORT_TX_DW3_GRP_A, \ +#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \ _PORT_TX_DW3_GRP_B, \ _PORT_TX_DW3_GRP_C) -#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \ +#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \ _PORT_TX_DW3_LN0_B, \ _PORT_TX_DW3_LN0_C) #define SCALE_DCOMP_METHOD (1 << 26) @@ -1409,10 +1460,10 @@ enum skl_disp_power_wells { #define _PORT_TX_DW4_GRP_A 0x162D10 #define _PORT_TX_DW4_GRP_B 0x6CD10 #define _PORT_TX_DW4_GRP_C 0x6CF10 -#define BXT_PORT_TX_DW4_LN0(port) _PORT3(port, _PORT_TX_DW4_LN0_A, \ +#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \ _PORT_TX_DW4_LN0_B, \ _PORT_TX_DW4_LN0_C) -#define BXT_PORT_TX_DW4_GRP(port) _PORT3(port, _PORT_TX_DW4_GRP_A, \ +#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \ _PORT_TX_DW4_GRP_B, \ _PORT_TX_DW4_GRP_C) #define DEEMPH_SHIFT 24 @@ -1423,17 +1474,17 @@ enum skl_disp_power_wells { #define _PORT_TX_DW14_LN0_C 0x6C938 #define LATENCY_OPTIM_SHIFT 30 #define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT) -#define BXT_PORT_TX_DW14_LN(port, lane) (_PORT3((port), _PORT_TX_DW14_LN0_A, \ +#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \ _PORT_TX_DW14_LN0_B, \ _PORT_TX_DW14_LN0_C) + \ _BXT_LANE_OFFSET(lane)) /* UAIMI scratch pad register 1 */ -#define UAIMI_SPR1 0x4F074 +#define UAIMI_SPR1 _MMIO(0x4F074) /* SKL VccIO mask */ #define SKL_VCCIO_MASK 0x1 /* SKL balance leg register */ -#define DISPIO_CR_TX_BMU_CR0 0x6C00C +#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C) /* I_boost values */ #define BALANCE_LEG_SHIFT(port) (8+3*(port)) #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) @@ -1450,7 +1501,7 @@ enum skl_disp_power_wells { * [0-15] @ 0x100000 gen6,vlv,chv * [0-31] @ 0x100000 gen7+ */ -#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4) +#define FENCE_REG(i) _MMIO(0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4) #define I830_FENCE_START_MASK 0x07f80000 #define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) @@ -1463,21 +1514,21 @@ enum skl_disp_power_wells { #define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) -#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8) -#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4) +#define FENCE_REG_965_LO(i) _MMIO(0x03000 + (i) * 8) +#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4) #define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_TILING_Y_SHIFT 1 #define I965_FENCE_REG_VALID (1<<0) #define I965_FENCE_MAX_PITCH_VAL 0x0400 -#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8) -#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4) +#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8) +#define FENCE_REG_GEN6_HI(i) _MMIO(0x100000 + (i) * 8 + 4) #define GEN6_FENCE_PITCH_SHIFT 32 #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 /* control register for cpu gtt access */ -#define TILECTL 0x101000 +#define TILECTL _MMIO(0x101000) #define TILECTL_SWZCTL (1 << 0) #define TILECTL_TLBPF (1 << 1) #define TILECTL_TLB_PREFETCH_DIS (1 << 2) @@ -1486,30 +1537,30 @@ enum skl_disp_power_wells { /* * Instruction and interrupt control regs */ -#define PGTBL_CTL 0x02020 +#define PGTBL_CTL _MMIO(0x02020) #define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ #define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ -#define PGTBL_ER 0x02024 -#define PRB0_BASE (0x2030-0x30) -#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */ -#define PRB2_BASE (0x2050-0x30) /* gen3 */ -#define SRB0_BASE (0x2100-0x30) /* gen2 */ -#define SRB1_BASE (0x2110-0x30) /* gen2 */ -#define SRB2_BASE (0x2120-0x30) /* 830 */ -#define SRB3_BASE (0x2130-0x30) /* 830 */ +#define PGTBL_ER _MMIO(0x02024) +#define PRB0_BASE (0x2030-0x30) +#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */ +#define PRB2_BASE (0x2050-0x30) /* gen3 */ +#define SRB0_BASE (0x2100-0x30) /* gen2 */ +#define SRB1_BASE (0x2110-0x30) /* gen2 */ +#define SRB2_BASE (0x2120-0x30) /* 830 */ +#define SRB3_BASE (0x2130-0x30) /* 830 */ #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 #define GEN8_BSD2_RING_BASE 0x1c000 #define VEBOX_RING_BASE 0x1a000 #define BLT_RING_BASE 0x22000 -#define RING_TAIL(base) ((base)+0x30) -#define RING_HEAD(base) ((base)+0x34) -#define RING_START(base) ((base)+0x38) -#define RING_CTL(base) ((base)+0x3c) -#define RING_SYNC_0(base) ((base)+0x40) -#define RING_SYNC_1(base) ((base)+0x44) -#define RING_SYNC_2(base) ((base)+0x48) +#define RING_TAIL(base) _MMIO((base)+0x30) +#define RING_HEAD(base) _MMIO((base)+0x34) +#define RING_START(base) _MMIO((base)+0x38) +#define RING_CTL(base) _MMIO((base)+0x3c) +#define RING_SYNC_0(base) _MMIO((base)+0x40) +#define RING_SYNC_1(base) _MMIO((base)+0x44) +#define RING_SYNC_2(base) _MMIO((base)+0x48) #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) #define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) @@ -1522,51 +1573,52 @@ enum skl_disp_power_wells { #define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) -#define GEN6_NOSYNC 0 -#define RING_PSMI_CTL(base) ((base)+0x50) -#define RING_MAX_IDLE(base) ((base)+0x54) -#define RING_HWS_PGA(base) ((base)+0x80) -#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) -#define RING_RESET_CTL(base) ((base)+0xd0) +#define GEN6_NOSYNC INVALID_MMIO_REG +#define RING_PSMI_CTL(base) _MMIO((base)+0x50) +#define RING_MAX_IDLE(base) _MMIO((base)+0x54) +#define RING_HWS_PGA(base) _MMIO((base)+0x80) +#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080) +#define RING_RESET_CTL(base) _MMIO((base)+0xd0) #define RESET_CTL_REQUEST_RESET (1 << 0) #define RESET_CTL_READY_TO_RESET (1 << 1) -#define HSW_GTT_CACHE_EN 0x4024 +#define HSW_GTT_CACHE_EN _MMIO(0x4024) #define GTT_CACHE_EN_ALL 0xF0007FFF -#define GEN7_WR_WATERMARK 0x4028 -#define GEN7_GFX_PRIO_CTRL 0x402C -#define ARB_MODE 0x4030 +#define GEN7_WR_WATERMARK _MMIO(0x4028) +#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C) +#define ARB_MODE _MMIO(0x4030) #define ARB_MODE_SWIZZLE_SNB (1<<4) #define ARB_MODE_SWIZZLE_IVB (1<<5) -#define GEN7_GFX_PEND_TLB0 0x4034 -#define GEN7_GFX_PEND_TLB1 0x4038 +#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034) +#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038) /* L3, CVS, ZTLB, RCC, CASC LRA min, max values */ -#define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4) +#define GEN7_LRA_LIMITS(i) _MMIO(0x403C + (i) * 4) #define GEN7_LRA_LIMITS_REG_NUM 13 -#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070 -#define GEN7_GFX_MAX_REQ_COUNT 0x4074 +#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070) +#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074) -#define GAMTARBMODE 0x04a08 +#define GAMTARBMODE _MMIO(0x04a08) #define ARB_MODE_BWGTLB_DISABLE (1<<9) #define ARB_MODE_SWIZZLE_BDW (1<<1) -#define RENDER_HWS_PGA_GEN7 (0x04080) -#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) +#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080) +#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id) #define RING_FAULT_GTTSEL_MASK (1<<11) #define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) #define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) #define RING_FAULT_VALID (1<<0) -#define DONE_REG 0x40b0 -#define GEN8_PRIVATE_PAT_LO 0x40e0 -#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4) -#define BSD_HWS_PGA_GEN7 (0x04180) -#define BLT_HWS_PGA_GEN7 (0x04280) -#define VEBOX_HWS_PGA_GEN7 (0x04380) -#define RING_ACTHD(base) ((base)+0x74) -#define RING_ACTHD_UDW(base) ((base)+0x5c) -#define RING_NOPID(base) ((base)+0x94) -#define RING_IMR(base) ((base)+0xa8) -#define RING_HWSTAM(base) ((base)+0x98) -#define RING_TIMESTAMP(base) ((base)+0x358) +#define DONE_REG _MMIO(0x40b0) +#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) +#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) +#define BSD_HWS_PGA_GEN7 _MMIO(0x04180) +#define BLT_HWS_PGA_GEN7 _MMIO(0x04280) +#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380) +#define RING_ACTHD(base) _MMIO((base)+0x74) +#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c) +#define RING_NOPID(base) _MMIO((base)+0x94) +#define RING_IMR(base) _MMIO((base)+0xa8) +#define RING_HWSTAM(base) _MMIO((base)+0x98) +#define RING_TIMESTAMP(base) _MMIO((base)+0x358) +#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -1583,57 +1635,65 @@ enum skl_disp_power_wells { #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ -#define GEN7_TLB_RD_ADDR 0x4700 +#define GEN7_TLB_RD_ADDR _MMIO(0x4700) #if 0 -#define PRB0_TAIL 0x02030 -#define PRB0_HEAD 0x02034 -#define PRB0_START 0x02038 -#define PRB0_CTL 0x0203c -#define PRB1_TAIL 0x02040 /* 915+ only */ -#define PRB1_HEAD 0x02044 /* 915+ only */ -#define PRB1_START 0x02048 /* 915+ only */ -#define PRB1_CTL 0x0204c /* 915+ only */ +#define PRB0_TAIL _MMIO(0x2030) +#define PRB0_HEAD _MMIO(0x2034) +#define PRB0_START _MMIO(0x2038) +#define PRB0_CTL _MMIO(0x203c) +#define PRB1_TAIL _MMIO(0x2040) /* 915+ only */ +#define PRB1_HEAD _MMIO(0x2044) /* 915+ only */ +#define PRB1_START _MMIO(0x2048) /* 915+ only */ +#define PRB1_CTL _MMIO(0x204c) /* 915+ only */ #endif -#define IPEIR_I965 0x02064 -#define IPEHR_I965 0x02068 -#define GEN7_SC_INSTDONE 0x07100 -#define GEN7_SAMPLER_INSTDONE 0x0e160 -#define GEN7_ROW_INSTDONE 0x0e164 +#define IPEIR_I965 _MMIO(0x2064) +#define IPEHR_I965 _MMIO(0x2068) +#define GEN7_SC_INSTDONE _MMIO(0x7100) +#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160) +#define GEN7_ROW_INSTDONE _MMIO(0xe164) #define I915_NUM_INSTDONE_REG 4 -#define RING_IPEIR(base) ((base)+0x64) -#define RING_IPEHR(base) ((base)+0x68) +#define RING_IPEIR(base) _MMIO((base)+0x64) +#define RING_IPEHR(base) _MMIO((base)+0x68) /* * On GEN4, only the render ring INSTDONE exists and has a different * layout than the GEN7+ version. * The GEN2 counterpart of this register is GEN2_INSTDONE. */ -#define RING_INSTDONE(base) ((base)+0x6c) -#define RING_INSTPS(base) ((base)+0x70) -#define RING_DMA_FADD(base) ((base)+0x78) -#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */ -#define RING_INSTPM(base) ((base)+0xc0) -#define RING_MI_MODE(base) ((base)+0x9c) -#define INSTPS 0x02070 /* 965+ only */ -#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */ -#define ACTHD_I965 0x02074 -#define HWS_PGA 0x02080 +#define RING_INSTDONE(base) _MMIO((base)+0x6c) +#define RING_INSTPS(base) _MMIO((base)+0x70) +#define RING_DMA_FADD(base) _MMIO((base)+0x78) +#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */ +#define RING_INSTPM(base) _MMIO((base)+0xc0) +#define RING_MI_MODE(base) _MMIO((base)+0x9c) +#define INSTPS _MMIO(0x2070) /* 965+ only */ +#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */ +#define ACTHD_I965 _MMIO(0x2074) +#define HWS_PGA _MMIO(0x2080) #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 -#define PWRCTXA 0x2088 /* 965GM+ only */ +#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ #define PWRCTX_EN (1<<0) -#define IPEIR 0x02088 -#define IPEHR 0x0208c -#define GEN2_INSTDONE 0x02090 -#define NOPID 0x02094 -#define HWSTAM 0x02098 -#define DMA_FADD_I8XX 0x020d0 -#define RING_BBSTATE(base) ((base)+0x110) -#define RING_BBADDR(base) ((base)+0x140) -#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */ - -#define ERROR_GEN6 0x040a0 -#define GEN7_ERR_INT 0x44040 +#define IPEIR _MMIO(0x2088) +#define IPEHR _MMIO(0x208c) +#define GEN2_INSTDONE _MMIO(0x2090) +#define NOPID _MMIO(0x2094) +#define HWSTAM _MMIO(0x2098) +#define DMA_FADD_I8XX _MMIO(0x20d0) +#define RING_BBSTATE(base) _MMIO((base)+0x110) +#define RING_BB_PPGTT (1 << 5) +#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */ +#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */ +#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */ +#define RING_BBADDR(base) _MMIO((base)+0x140) +#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */ +#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */ +#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */ +#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */ +#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */ + +#define ERROR_GEN6 _MMIO(0x40a0) +#define GEN7_ERR_INT _MMIO(0x44040) #define ERR_INT_POISON (1<<31) #define ERR_INT_MMIO_UNCLAIMED (1<<13) #define ERR_INT_PIPE_CRC_DONE_C (1<<8) @@ -1645,13 +1705,13 @@ enum skl_disp_power_wells { #define ERR_INT_FIFO_UNDERRUN_A (1<<0) #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) -#define GEN8_FAULT_TLB_DATA0 0x04b10 -#define GEN8_FAULT_TLB_DATA1 0x04b14 +#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) +#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) -#define FPGA_DBG 0x42300 +#define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1<<31) -#define DERRMR 0x44050 +#define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ #define DERRMR_PIPEA_SCANLINE (1<<0) #define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) @@ -1675,29 +1735,29 @@ enum skl_disp_power_wells { * for various sorts of correct behavior. The top 16 bits of each are * the enables for writing to the corresponding low bit. */ -#define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN _MMIO(0x2084) #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) -#define _3D_CHICKEN2 0x0208c +#define _3D_CHICKEN2 _MMIO(0x208c) /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the * particular danger of not doing so is not specified. */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) -#define _3D_CHICKEN3 0x02090 +#define _3D_CHICKEN3 _MMIO(0x2090) #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ -#define MI_MODE 0x0209c +#define MI_MODE _MMIO(0x209c) # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 12) # define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define MODE_IDLE (1 << 9) # define STOP_RING (1 << 8) -#define GEN6_GT_MODE 0x20d0 -#define GEN7_GT_MODE 0x7008 +#define GEN6_GT_MODE _MMIO(0x20d0) +#define GEN7_GT_MODE _MMIO(0x7008) #define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) #define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) #define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) @@ -1707,9 +1767,9 @@ enum skl_disp_power_wells { #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) -#define GFX_MODE 0x02520 -#define GFX_MODE_GEN7 0x0229c -#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) +#define GFX_MODE _MMIO(0x2520) +#define GFX_MODE_GEN7 _MMIO(0x229c) +#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c) #define GFX_RUN_LIST_ENABLE (1<<15) #define GFX_INTERRUPT_STEERING (1<<14) #define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) @@ -1727,36 +1787,36 @@ enum skl_disp_power_wells { #define VLV_DISPLAY_BASE 0x180000 #define VLV_MIPI_BASE VLV_DISPLAY_BASE -#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030) -#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034) -#define SCPD0 0x0209c /* 915+ only */ -#define IER 0x020a0 -#define IIR 0x020a4 -#define IMR 0x020a8 -#define ISR 0x020ac -#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) +#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) +#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) +#define SCPD0 _MMIO(0x209c) /* 915+ only */ +#define IER _MMIO(0x20a0) +#define IIR _MMIO(0x20a4) +#define IMR _MMIO(0x20a8) +#define ISR _MMIO(0x20ac) +#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060) #define GINT_DIS (1<<22) #define GCFG_DIS (1<<8) -#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064) -#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) -#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) -#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) -#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) -#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) -#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) +#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064) +#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084) +#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0) +#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4) +#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8) +#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac) +#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120) #define VLV_PCBR_ADDR_SHIFT 12 #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ -#define EIR 0x020b0 -#define EMR 0x020b4 -#define ESR 0x020b8 +#define EIR _MMIO(0x20b0) +#define EMR _MMIO(0x20b4) +#define ESR _MMIO(0x20b8) #define GM45_ERROR_PAGE_TABLE (1<<5) #define GM45_ERROR_MEM_PRIV (1<<4) #define I915_ERROR_PAGE_TABLE (1<<4) #define GM45_ERROR_CP_PRIV (1<<3) #define I915_ERROR_MEMORY_REFRESH (1<<1) #define I915_ERROR_INSTRUCTION (1<<0) -#define INSTPM 0x020c0 +#define INSTPM _MMIO(0x20c0) #define INSTPM_SELF_EN (1<<12) /* 915GM only */ #define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts will not assert AGPBUSY# and will only @@ -1764,14 +1824,14 @@ enum skl_disp_power_wells { #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ #define INSTPM_TLB_INVALIDATE (1<<9) #define INSTPM_SYNC_FLUSH (1<<5) -#define ACTHD 0x020c8 -#define MEM_MODE 0x020cc +#define ACTHD _MMIO(0x20c8) +#define MEM_MODE _MMIO(0x20cc) #define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */ #define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */ #define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */ -#define FW_BLC 0x020d8 -#define FW_BLC2 0x020dc -#define FW_BLC_SELF 0x020e0 /* 915+ only */ +#define FW_BLC _MMIO(0x20d8) +#define FW_BLC2 _MMIO(0x20dc) +#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */ #define FW_BLC_SELF_EN_MASK (1<<31) #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ #define FW_BLC_SELF_EN (1<<15) /* 945 only */ @@ -1779,7 +1839,7 @@ enum skl_disp_power_wells { #define MM_FIFO_WATERMARK 0x0001F000 #define LM_BURST_LENGTH 0x00000700 #define LM_FIFO_WATERMARK 0x0000001F -#define MI_ARB_STATE 0x020e4 /* 915+ only */ +#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */ /* Make render/texture TLB fetches lower priorty than associated data * fetches. This is not turned on by default @@ -1843,11 +1903,11 @@ enum skl_disp_power_wells { #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ -#define MI_STATE 0x020e4 /* gen2 only */ +#define MI_STATE _MMIO(0x20e4) /* gen2 only */ #define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */ #define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */ -#define CACHE_MODE_0 0x02120 /* 915+ only */ +#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */ #define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) @@ -1856,32 +1916,32 @@ enum skl_disp_power_wells { #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) #define CM0_RC_OP_FLUSH_DISABLE (1<<0) -#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ -#define GFX_FLSH_CNTL_GEN6 0x101008 +#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */ +#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008) #define GFX_FLSH_CNTL_EN (1<<0) -#define ECOSKPD 0x021d0 +#define ECOSKPD _MMIO(0x21d0) #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) -#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ +#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ #define RC_OP_FLUSH_ENABLE (1<<0) #define HIZ_RAW_STALL_OPT_DISABLE (1<<2) -#define CACHE_MODE_1 0x7004 /* IVB+ */ +#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1) -#define GEN6_BLITTER_ECOSKPD 0x221d0 +#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0) #define GEN6_BLITTER_LOCK_SHIFT 16 #define GEN6_BLITTER_FBC_NOTIFY (1<<3) -#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 +#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050) #define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) /* Fuse readout registers for GT */ -#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) +#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168) #define CHV_FGT_DISABLE_SS0 (1 << 10) #define CHV_FGT_DISABLE_SS1 (1 << 11) #define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 @@ -1893,7 +1953,7 @@ enum skl_disp_power_wells { #define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 #define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) -#define GEN8_FUSE2 0x9120 +#define GEN8_FUSE2 _MMIO(0x9120) #define GEN8_F2_SS_DIS_SHIFT 21 #define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT) #define GEN8_F2_S_ENA_SHIFT 25 @@ -1902,22 +1962,22 @@ enum skl_disp_power_wells { #define GEN9_F2_SS_DIS_SHIFT 20 #define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) -#define GEN8_EU_DISABLE0 0x9134 +#define GEN8_EU_DISABLE0 _MMIO(0x9134) #define GEN8_EU_DIS0_S0_MASK 0xffffff #define GEN8_EU_DIS0_S1_SHIFT 24 #define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT) -#define GEN8_EU_DISABLE1 0x9138 +#define GEN8_EU_DISABLE1 _MMIO(0x9138) #define GEN8_EU_DIS1_S1_MASK 0xffff #define GEN8_EU_DIS1_S2_SHIFT 16 #define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT) -#define GEN8_EU_DISABLE2 0x913c +#define GEN8_EU_DISABLE2 _MMIO(0x913c) #define GEN8_EU_DIS2_S2_MASK 0xff -#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4) +#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4) -#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 +#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) #define GEN6_BSD_SLEEP_INDICATOR (1 << 3) @@ -1995,9 +2055,9 @@ enum skl_disp_power_wells { #define I915_ASLE_INTERRUPT (1<<0) #define I915_BSD_USER_INTERRUPT (1<<25) -#define GEN6_BSD_RNCID 0x12198 +#define GEN6_BSD_RNCID _MMIO(0x12198) -#define GEN7_FF_THREAD_MODE 0x20a0 +#define GEN7_FF_THREAD_MODE _MMIO(0x20a0) #define GEN7_FF_SCHED_MASK 0x0077070 #define GEN8_FF_DS_REF_CNT_FFME (1 << 19) #define GEN7_FF_TS_SCHED_HS1 (0x5<<16) @@ -2018,9 +2078,9 @@ enum skl_disp_power_wells { * Framebuffer compression (915+ only) */ -#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ -#define FBC_LL_BASE 0x03204 /* 4k page aligned */ -#define FBC_CONTROL 0x03208 +#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ +#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ +#define FBC_CONTROL _MMIO(0x3208) #define FBC_CTL_EN (1<<31) #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) @@ -2028,14 +2088,14 @@ enum skl_disp_power_wells { #define FBC_CTL_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO_SHIFT (0) -#define FBC_COMMAND 0x0320c +#define FBC_COMMAND _MMIO(0x320c) #define FBC_CMD_COMPRESS (1<<0) -#define FBC_STATUS 0x03210 +#define FBC_STATUS _MMIO(0x3210) #define FBC_STAT_COMPRESSING (1<<31) #define FBC_STAT_COMPRESSED (1<<30) #define FBC_STAT_MODIFIED (1<<29) #define FBC_STAT_CURRENT_LINE_SHIFT (0) -#define FBC_CONTROL2 0x03214 +#define FBC_CONTROL2 _MMIO(0x3214) #define FBC_CTL_FENCE_DBL (0<<4) #define FBC_CTL_IDLE_IMM (0<<2) #define FBC_CTL_IDLE_FULL (1<<2) @@ -2043,17 +2103,17 @@ enum skl_disp_power_wells { #define FBC_CTL_IDLE_DEBUG (3<<2) #define FBC_CTL_CPU_FENCE (1<<1) #define FBC_CTL_PLANE(plane) ((plane)<<0) -#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ -#define FBC_TAG(i) (0x03300 + (i) * 4) +#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ +#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) -#define FBC_STATUS2 0x43214 +#define FBC_STATUS2 _MMIO(0x43214) #define FBC_COMPRESSION_MASK 0x7ff #define FBC_LL_SIZE (1536) /* Framebuffer compression for GM45+ */ -#define DPFC_CB_BASE 0x3200 -#define DPFC_CONTROL 0x3208 +#define DPFC_CB_BASE _MMIO(0x3200) +#define DPFC_CONTROL _MMIO(0x3208) #define DPFC_CTL_EN (1<<31) #define DPFC_CTL_PLANE(plane) ((plane)<<30) #define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) @@ -2064,37 +2124,37 @@ enum skl_disp_power_wells { #define DPFC_CTL_LIMIT_1X (0<<6) #define DPFC_CTL_LIMIT_2X (1<<6) #define DPFC_CTL_LIMIT_4X (2<<6) -#define DPFC_RECOMP_CTL 0x320c +#define DPFC_RECOMP_CTL _MMIO(0x320c) #define DPFC_RECOMP_STALL_EN (1<<27) #define DPFC_RECOMP_STALL_WM_SHIFT (16) #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) -#define DPFC_STATUS 0x3210 +#define DPFC_STATUS _MMIO(0x3210) #define DPFC_INVAL_SEG_SHIFT (16) #define DPFC_INVAL_SEG_MASK (0x07ff0000) #define DPFC_COMP_SEG_SHIFT (0) #define DPFC_COMP_SEG_MASK (0x000003ff) -#define DPFC_STATUS2 0x3214 -#define DPFC_FENCE_YOFF 0x3218 -#define DPFC_CHICKEN 0x3224 +#define DPFC_STATUS2 _MMIO(0x3214) +#define DPFC_FENCE_YOFF _MMIO(0x3218) +#define DPFC_CHICKEN _MMIO(0x3224) #define DPFC_HT_MODIFY (1<<31) /* Framebuffer compression for Ironlake */ -#define ILK_DPFC_CB_BASE 0x43200 -#define ILK_DPFC_CONTROL 0x43208 +#define ILK_DPFC_CB_BASE _MMIO(0x43200) +#define ILK_DPFC_CONTROL _MMIO(0x43208) #define FBC_CTL_FALSE_COLOR (1<<10) /* The bit 28-8 is reserved */ #define DPFC_RESERVED (0x1FFFFF00) -#define ILK_DPFC_RECOMP_CTL 0x4320c -#define ILK_DPFC_STATUS 0x43210 -#define ILK_DPFC_FENCE_YOFF 0x43218 -#define ILK_DPFC_CHICKEN 0x43224 -#define ILK_FBC_RT_BASE 0x2128 +#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) +#define ILK_DPFC_STATUS _MMIO(0x43210) +#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) +#define ILK_DPFC_CHICKEN _MMIO(0x43224) +#define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_VALID (1<<0) #define SNB_FBC_FRONT_BUFFER (1<<1) -#define ILK_DISPLAY_CHICKEN1 0x42000 +#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1<<22) #define ILK_PABSTRETCH_DIS (1<<21) @@ -2104,31 +2164,31 @@ enum skl_disp_power_wells { * * The following two registers are of type GTTMMADR */ -#define SNB_DPFC_CTL_SA 0x100100 +#define SNB_DPFC_CTL_SA _MMIO(0x100100) #define SNB_CPU_FENCE_ENABLE (1<<29) -#define DPFC_CPU_FENCE_OFFSET 0x100104 +#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) /* Framebuffer compression for Ivybridge */ -#define IVB_FBC_RT_BASE 0x7020 +#define IVB_FBC_RT_BASE _MMIO(0x7020) -#define IPS_CTL 0x43408 +#define IPS_CTL _MMIO(0x43408) #define IPS_ENABLE (1 << 31) -#define MSG_FBC_REND_STATE 0x50380 +#define MSG_FBC_REND_STATE _MMIO(0x50380) #define FBC_REND_NUKE (1<<2) #define FBC_REND_CACHE_CLEAN (1<<1) /* * GPIO regs */ -#define GPIOA 0x5010 -#define GPIOB 0x5014 -#define GPIOC 0x5018 -#define GPIOD 0x501c -#define GPIOE 0x5020 -#define GPIOF 0x5024 -#define GPIOG 0x5028 -#define GPIOH 0x502c +#define GPIOA _MMIO(0x5010) +#define GPIOB _MMIO(0x5014) +#define GPIOC _MMIO(0x5018) +#define GPIOD _MMIO(0x501c) +#define GPIOE _MMIO(0x5020) +#define GPIOF _MMIO(0x5024) +#define GPIOG _MMIO(0x5028) +#define GPIOH _MMIO(0x502c) # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) @@ -2144,7 +2204,7 @@ enum skl_disp_power_wells { # define GPIO_DATA_VAL_IN (1 << 12) # define GPIO_DATA_PULLUP_DISABLE (1 << 13) -#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ +#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ #define GMBUS_RATE_100KHZ (0<<8) #define GMBUS_RATE_50KHZ (1<<8) #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ @@ -2163,7 +2223,7 @@ enum skl_disp_power_wells { #define GMBUS_PIN_2_BXT 2 #define GMBUS_PIN_3_BXT 3 #define GMBUS_NUM_PINS 7 /* including 0 */ -#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */ +#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ #define GMBUS_SW_CLR_INT (1<<31) #define GMBUS_SW_RDY (1<<30) #define GMBUS_ENT (1<<29) /* enable timeout */ @@ -2177,7 +2237,7 @@ enum skl_disp_power_wells { #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1<<0) #define GMBUS_SLAVE_WRITE (0<<0) -#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */ +#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */ #define GMBUS_INUSE (1<<15) #define GMBUS_HW_WAIT_PHASE (1<<14) #define GMBUS_STALL_TIMEOUT (1<<13) @@ -2185,14 +2245,14 @@ enum skl_disp_power_wells { #define GMBUS_HW_RDY (1<<11) #define GMBUS_SATOER (1<<10) #define GMBUS_ACTIVE (1<<9) -#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ -#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ +#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ +#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) #define GMBUS_NAK_EN (1<<3) #define GMBUS_IDLE_EN (1<<2) #define GMBUS_HW_WAIT_EN (1<<1) #define GMBUS_HW_RDY_EN (1<<0) -#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */ +#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */ #define GMBUS_2BYTE_INDEX_EN (1<<31) /* @@ -2201,11 +2261,11 @@ enum skl_disp_power_wells { #define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) #define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) #define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) -#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) +#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) -#define VGA0 0x6000 -#define VGA1 0x6004 -#define VGA_PD 0x6010 +#define VGA0 _MMIO(0x6000) +#define VGA1 _MMIO(0x6004) +#define VGA_PD _MMIO(0x6010) #define VGA0_PD_P2_DIV_4 (1 << 7) #define VGA0_PD_P1_DIV_2 (1 << 5) #define VGA0_PD_P1_SHIFT 0 @@ -2241,9 +2301,9 @@ enum skl_disp_power_wells { #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 /* Additional CHV pll/phy registers */ -#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) +#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240) #define DPLL_PORTD_READY_MASK (0xf) -#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) +#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100) #define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27)) #define PHY_LDO_DELAY_0NS 0x0 #define PHY_LDO_DELAY_200NS 0x1 @@ -2254,7 +2314,7 @@ enum skl_disp_power_wells { #define PHY_CH_DEEP_PSR 0x7 #define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) #define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) -#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) +#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104) #define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) #define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch)))) #define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline)))) @@ -2300,7 +2360,7 @@ enum skl_disp_power_wells { #define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) #define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) #define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) -#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) +#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. @@ -2339,12 +2399,12 @@ enum skl_disp_power_wells { #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 -#define _FPA0 0x06040 -#define _FPA1 0x06044 -#define _FPB0 0x06048 -#define _FPB1 0x0604c -#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) -#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) +#define _FPA0 0x6040 +#define _FPA1 0x6044 +#define _FPB0 0x6048 +#define _FPB1 0x604c +#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0) +#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1) #define FP_N_DIV_MASK 0x003f0000 #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 @@ -2353,7 +2413,7 @@ enum skl_disp_power_wells { #define FP_M2_DIV_MASK 0x0000003f #define FP_M2_PINEVIEW_DIV_MASK 0x000000ff #define FP_M2_DIV_SHIFT 0 -#define DPLL_TEST 0x606c +#define DPLL_TEST _MMIO(0x606c) #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) @@ -2364,12 +2424,12 @@ enum skl_disp_power_wells { #define DPLLA_TEST_N_BYPASS (1 << 3) #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) -#define D_STATE 0x6104 +#define D_STATE _MMIO(0x6104) #define DSTATE_GFX_RESET_I830 (1<<6) #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) -#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) +#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ @@ -2408,7 +2468,7 @@ enum skl_disp_power_wells { # define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ # define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ -#define RENCLK_GATE_D1 0x6204 +#define RENCLK_GATE_D1 _MMIO(0x6204) # define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ # define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ # define PC_FE_CLOCK_GATE_DISABLE (1 << 11) @@ -2472,35 +2532,35 @@ enum skl_disp_power_wells { # define I965_FT_CLOCK_GATE_DISABLE (1 << 1) # define I965_DM_CLOCK_GATE_DISABLE (1 << 0) -#define RENCLK_GATE_D2 0x6208 +#define RENCLK_GATE_D2 _MMIO(0x6208) #define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) #define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) #define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) -#define VDECCLK_GATE_D 0x620C /* g4x only */ +#define VDECCLK_GATE_D _MMIO(0x620C) /* g4x only */ #define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4) -#define RAMCLK_GATE_D 0x6210 /* CRL only */ -#define DEUC 0x6214 /* CRL only */ +#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */ +#define DEUC _MMIO(0x6214) /* CRL only */ -#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) +#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500) #define FW_CSPWRDWNEN (1<<15) -#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) +#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504) -#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508) +#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508) #define CDCLK_FREQ_SHIFT 4 #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) #define CZCLK_FREQ_MASK 0xf -#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C) +#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C) #define PFI_CREDIT_63 (9 << 28) /* chv only */ #define PFI_CREDIT_31 (8 << 28) /* chv only */ #define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */ #define PFI_CREDIT_RESEND (1 << 27) #define VGA_FAST_MODE_DISABLE (1 << 14) -#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) +#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510) /* * Palette regs @@ -2508,8 +2568,8 @@ enum skl_disp_power_wells { #define PALETTE_A_OFFSET 0xa000 #define PALETTE_B_OFFSET 0xa800 #define CHV_PALETTE_C_OFFSET 0xc000 -#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \ - dev_priv->info.display_mmio_offset + (i) * 4) +#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \ + dev_priv->info.display_mmio_offset + (i) * 4) /* MCH MMIO space */ @@ -2527,37 +2587,37 @@ enum skl_disp_power_wells { #define MCHBAR_MIRROR_BASE_SNB 0x140000 -#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34) -#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48) +#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34) +#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48) #define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16) #define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4) /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ -#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) +#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04) /* 915-945 and GM965 MCH register controlling DRAM channel access */ -#define DCC 0x10200 +#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200) #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) -#define DCC2 0x10204 +#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204) #define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20) /* Pineview MCH register contains DDR3 setting */ -#define CSHRDDR3CTL 0x101a8 +#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8) #define CSHRDDR3CTL_DDR3 (1 << 2) /* 965 MCH register controlling DRAM channel configuration */ -#define C0DRB3 0x10206 -#define C1DRB3 0x10606 +#define C0DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x206) +#define C1DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x606) /* snb MCH registers for reading the DRAM channel configuration */ -#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) -#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) -#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) +#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004) +#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008) +#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) #define MAD_DIMM_ECC_MASK (0x3 << 24) #define MAD_DIMM_ECC_OFF (0x0 << 24) #define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) @@ -2577,14 +2637,14 @@ enum skl_disp_power_wells { #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) /* snb MCH registers for priority tuning */ -#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) +#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10) #define MCH_SSKPD_WM0_MASK 0x3f #define MCH_SSKPD_WM0_VAL 0xc -#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) +#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c) /* Clocking configuration register */ -#define CLKCFG 0x10c00 +#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00) #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ @@ -2600,26 +2660,26 @@ enum skl_disp_power_wells { #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) -#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38) -#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f) +#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38) +#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f) -#define TSC1 0x11001 +#define TSC1 _MMIO(0x11001) #define TSE (1<<0) -#define TR1 0x11006 -#define TSFS 0x11020 +#define TR1 _MMIO(0x11006) +#define TSFS _MMIO(0x11020) #define TSFS_SLOPE_MASK 0x0000ff00 #define TSFS_SLOPE_SHIFT 8 #define TSFS_INTR_MASK 0x000000ff -#define CRSTANDVID 0x11100 -#define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ +#define CRSTANDVID _MMIO(0x11100) +#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ #define PXVFREQ_PX_MASK 0x7f000000 #define PXVFREQ_PX_SHIFT 24 -#define VIDFREQ_BASE 0x11110 -#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ -#define VIDFREQ2 0x11114 -#define VIDFREQ3 0x11118 -#define VIDFREQ4 0x1111c +#define VIDFREQ_BASE _MMIO(0x11110) +#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */ +#define VIDFREQ2 _MMIO(0x11114) +#define VIDFREQ3 _MMIO(0x11118) +#define VIDFREQ4 _MMIO(0x1111c) #define VIDFREQ_P0_MASK 0x1f000000 #define VIDFREQ_P0_SHIFT 24 #define VIDFREQ_P0_CSCLK_MASK 0x00f00000 @@ -2631,8 +2691,8 @@ enum skl_disp_power_wells { #define VIDFREQ_P1_CSCLK_MASK 0x000000f0 #define VIDFREQ_P1_CSCLK_SHIFT 4 #define VIDFREQ_P1_CRCLK_MASK 0x0000000f -#define INTTOEXT_BASE_ILK 0x11300 -#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ +#define INTTOEXT_BASE_ILK _MMIO(0x11300) +#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */ #define INTTOEXT_MAP3_SHIFT 24 #define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) #define INTTOEXT_MAP2_SHIFT 16 @@ -2641,7 +2701,7 @@ enum skl_disp_power_wells { #define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) #define INTTOEXT_MAP0_SHIFT 0 #define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) -#define MEMSWCTL 0x11170 /* Ironlake only */ +#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */ #define MEMCTL_CMD_MASK 0xe000 #define MEMCTL_CMD_SHIFT 13 #define MEMCTL_CMD_RCLK_OFF 0 @@ -2656,8 +2716,8 @@ enum skl_disp_power_wells { #define MEMCTL_FREQ_SHIFT 8 #define MEMCTL_SFCAVM (1<<7) #define MEMCTL_TGT_VID_MASK 0x007f -#define MEMIHYST 0x1117c -#define MEMINTREN 0x11180 /* 16 bits */ +#define MEMIHYST _MMIO(0x1117c) +#define MEMINTREN _MMIO(0x11180) /* 16 bits */ #define MEMINT_RSEXIT_EN (1<<8) #define MEMINT_CX_SUPR_EN (1<<7) #define MEMINT_CONT_BUSY_EN (1<<6) @@ -2667,7 +2727,7 @@ enum skl_disp_power_wells { #define MEMINT_UP_EVAL_EN (1<<2) #define MEMINT_DOWN_EVAL_EN (1<<1) #define MEMINT_SW_CMD_EN (1<<0) -#define MEMINTRSTR 0x11182 /* 16 bits */ +#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */ #define MEM_RSEXIT_MASK 0xc000 #define MEM_RSEXIT_SHIFT 14 #define MEM_CONT_BUSY_MASK 0x3000 @@ -2687,7 +2747,7 @@ enum skl_disp_power_wells { #define MEM_INT_STEER_CMR 1 #define MEM_INT_STEER_SMI 2 #define MEM_INT_STEER_SCI 3 -#define MEMINTRSTS 0x11184 +#define MEMINTRSTS _MMIO(0x11184) #define MEMINT_RSEXIT (1<<7) #define MEMINT_CONT_BUSY (1<<6) #define MEMINT_AVG_BUSY (1<<5) @@ -2696,7 +2756,7 @@ enum skl_disp_power_wells { #define MEMINT_UP_EVAL (1<<2) #define MEMINT_DOWN_EVAL (1<<1) #define MEMINT_SW_CMD (1<<0) -#define MEMMODECTL 0x11190 +#define MEMMODECTL _MMIO(0x11190) #define MEMMODE_BOOST_EN (1<<31) #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ #define MEMMODE_BOOST_FREQ_SHIFT 24 @@ -2713,8 +2773,8 @@ enum skl_disp_power_wells { #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ #define MEMMODE_FMAX_SHIFT 4 #define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ -#define RCBMAXAVG 0x1119c -#define MEMSWCTL2 0x1119e /* Cantiga only */ +#define RCBMAXAVG _MMIO(0x1119c) +#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */ #define SWMEMCMD_RENDER_OFF (0 << 13) #define SWMEMCMD_RENDER_ON (1 << 13) #define SWMEMCMD_SWFREQ (2 << 13) @@ -2726,11 +2786,11 @@ enum skl_disp_power_wells { #define SWFREQ_MASK 0x0380 /* P0-7 */ #define SWFREQ_SHIFT 7 #define TARVID_MASK 0x001f -#define MEMSTAT_CTG 0x111a0 -#define RCBMINAVG 0x111a0 -#define RCUPEI 0x111b0 -#define RCDNEI 0x111b4 -#define RSTDBYCTL 0x111b8 +#define MEMSTAT_CTG _MMIO(0x111a0) +#define RCBMINAVG _MMIO(0x111a0) +#define RCUPEI _MMIO(0x111b0) +#define RCDNEI _MMIO(0x111b4) +#define RSTDBYCTL _MMIO(0x111b8) #define RS1EN (1<<31) #define RS2EN (1<<30) #define RS3EN (1<<29) @@ -2774,10 +2834,10 @@ enum skl_disp_power_wells { #define RS_CSTATE_C367_RS2 (3<<4) #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ -#define VIDCTL 0x111c0 -#define VIDSTS 0x111c8 -#define VIDSTART 0x111cc /* 8 bits */ -#define MEMSTAT_ILK 0x111f8 +#define VIDCTL _MMIO(0x111c0) +#define VIDSTS _MMIO(0x111c8) +#define VIDSTART _MMIO(0x111cc) /* 8 bits */ +#define MEMSTAT_ILK _MMIO(0x111f8) #define MEMSTAT_VID_MASK 0x7f00 #define MEMSTAT_VID_SHIFT 8 #define MEMSTAT_PSTATE_MASK 0x00f8 @@ -2788,55 +2848,55 @@ enum skl_disp_power_wells { #define MEMSTAT_SRC_CTL_TRB 1 #define MEMSTAT_SRC_CTL_THM 2 #define MEMSTAT_SRC_CTL_STDBY 3 -#define RCPREVBSYTUPAVG 0x113b8 -#define RCPREVBSYTDNAVG 0x113bc -#define PMMISC 0x11214 +#define RCPREVBSYTUPAVG _MMIO(0x113b8) +#define RCPREVBSYTDNAVG _MMIO(0x113bc) +#define PMMISC _MMIO(0x11214) #define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ -#define SDEW 0x1124c -#define CSIEW0 0x11250 -#define CSIEW1 0x11254 -#define CSIEW2 0x11258 -#define PEW(i) (0x1125c + (i) * 4) /* 5 registers */ -#define DEW(i) (0x11270 + (i) * 4) /* 3 registers */ -#define MCHAFE 0x112c0 -#define CSIEC 0x112e0 -#define DMIEC 0x112e4 -#define DDREC 0x112e8 -#define PEG0EC 0x112ec -#define PEG1EC 0x112f0 -#define GFXEC 0x112f4 -#define RPPREVBSYTUPAVG 0x113b8 -#define RPPREVBSYTDNAVG 0x113bc -#define ECR 0x11600 +#define SDEW _MMIO(0x1124c) +#define CSIEW0 _MMIO(0x11250) +#define CSIEW1 _MMIO(0x11254) +#define CSIEW2 _MMIO(0x11258) +#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */ +#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */ +#define MCHAFE _MMIO(0x112c0) +#define CSIEC _MMIO(0x112e0) +#define DMIEC _MMIO(0x112e4) +#define DDREC _MMIO(0x112e8) +#define PEG0EC _MMIO(0x112ec) +#define PEG1EC _MMIO(0x112f0) +#define GFXEC _MMIO(0x112f4) +#define RPPREVBSYTUPAVG _MMIO(0x113b8) +#define RPPREVBSYTDNAVG _MMIO(0x113bc) +#define ECR _MMIO(0x11600) #define ECR_GPFE (1<<31) #define ECR_IMONE (1<<30) #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ -#define OGW0 0x11608 -#define OGW1 0x1160c -#define EG0 0x11610 -#define EG1 0x11614 -#define EG2 0x11618 -#define EG3 0x1161c -#define EG4 0x11620 -#define EG5 0x11624 -#define EG6 0x11628 -#define EG7 0x1162c -#define PXW(i) (0x11664 + (i) * 4) /* 4 registers */ -#define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */ -#define LCFUSE02 0x116c0 +#define OGW0 _MMIO(0x11608) +#define OGW1 _MMIO(0x1160c) +#define EG0 _MMIO(0x11610) +#define EG1 _MMIO(0x11614) +#define EG2 _MMIO(0x11618) +#define EG3 _MMIO(0x1161c) +#define EG4 _MMIO(0x11620) +#define EG5 _MMIO(0x11624) +#define EG6 _MMIO(0x11628) +#define EG7 _MMIO(0x1162c) +#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */ +#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */ +#define LCFUSE02 _MMIO(0x116c0) #define LCFUSE_HIV_MASK 0x000000ff -#define CSIPLL0 0x12c10 -#define DDRMPLL1 0X12c20 -#define PEG_BAND_GAP_DATA 0x14d68 +#define CSIPLL0 _MMIO(0x12c10) +#define DDRMPLL1 _MMIO(0X12c20) +#define PEG_BAND_GAP_DATA _MMIO(0x14d68) -#define GEN6_GT_THREAD_STATUS_REG 0x13805c +#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c) #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 -#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) -#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070) -#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) -#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) -#define BXT_RP_STATE_CAP 0x138170 +#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948) +#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070) +#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994) +#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) +#define BXT_RP_STATE_CAP _MMIO(0x138170) #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) @@ -2850,7 +2910,7 @@ enum skl_disp_power_wells { /* * Logical Context regs */ -#define CCID 0x2180 +#define CCID _MMIO(0x2180) #define CCID_EN (1<<0) /* * Notes on SNB/IVB/VLV context size: @@ -2865,7 +2925,7 @@ enum skl_disp_power_wells { * - GT1 size just indicates how much of render context * doesn't need saving on GT1 */ -#define CXT_SIZE 0x21a0 +#define CXT_SIZE _MMIO(0x21a0) #define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f) #define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f) #define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f) @@ -2874,7 +2934,7 @@ enum skl_disp_power_wells { #define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_PIPELINE_SIZE(cxt_reg)) -#define GEN7_CXT_SIZE 0x21a8 +#define GEN7_CXT_SIZE _MMIO(0x21a8) #define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f) #define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7) #define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f) @@ -2894,23 +2954,23 @@ enum skl_disp_power_wells { /* Same as Haswell, but 72064 bytes now. */ #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) -#define CHV_CLK_CTL1 0x101100 -#define VLV_CLK_CTL2 0x101104 +#define CHV_CLK_CTL1 _MMIO(0x101100) +#define VLV_CLK_CTL2 _MMIO(0x101104) #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 /* * Overlay regs */ -#define OVADD 0x30000 -#define DOVSTA 0x30008 +#define OVADD _MMIO(0x30000) +#define DOVSTA _MMIO(0x30008) #define OC_BUF (0x3<<20) -#define OGAMC5 0x30010 -#define OGAMC4 0x30014 -#define OGAMC3 0x30018 -#define OGAMC2 0x3001c -#define OGAMC1 0x30020 -#define OGAMC0 0x30024 +#define OGAMC5 _MMIO(0x30010) +#define OGAMC4 _MMIO(0x30014) +#define OGAMC3 _MMIO(0x30018) +#define OGAMC2 _MMIO(0x3001c) +#define OGAMC1 _MMIO(0x30020) +#define OGAMC0 _MMIO(0x30024) /* * Display engine regs @@ -2970,28 +3030,18 @@ enum skl_disp_power_wells { #define _PIPE_CRC_RES_4_B_IVB 0x61070 #define _PIPE_CRC_RES_5_B_IVB 0x61074 -#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) -#define PIPE_CRC_RES_1_IVB(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) -#define PIPE_CRC_RES_2_IVB(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) -#define PIPE_CRC_RES_3_IVB(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) -#define PIPE_CRC_RES_4_IVB(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) -#define PIPE_CRC_RES_5_IVB(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) - -#define PIPE_CRC_RES_RED(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A) -#define PIPE_CRC_RES_GREEN(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A) -#define PIPE_CRC_RES_BLUE(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A) -#define PIPE_CRC_RES_RES1_I915(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915) -#define PIPE_CRC_RES_RES2_G4X(pipe) \ - _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X) +#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A) +#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB) +#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB) +#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB) +#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB) +#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB) + +#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A) +#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A) +#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A) +#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915) +#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X) /* Pipe A timing regs */ #define _HTOTAL_A 0x60000 @@ -3023,20 +3073,20 @@ enum skl_disp_power_wells { #define CHV_TRANSCODER_C_OFFSET 0x63000 #define TRANSCODER_EDP_OFFSET 0x6f000 -#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ +#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ dev_priv->info.display_mmio_offset) -#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) -#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) -#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) -#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) -#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) -#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) -#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) -#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) -#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) -#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A) +#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) +#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) +#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A) +#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A) +#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A) +#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A) +#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A) +#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A) +#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) +#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) /* VLV eDP PSR registers */ #define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090) @@ -3052,14 +3102,14 @@ enum skl_disp_power_wells { #define VLV_EDP_PSR_DBL_FRAME (1<<10) #define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16) #define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16 -#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB) +#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB) #define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0) #define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0) #define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30) #define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31) #define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30) -#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB) +#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB) #define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094) #define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094) @@ -3072,11 +3122,12 @@ enum skl_disp_power_wells { #define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0) #define VLV_EDP_PSR_EXIT (5<<0) #define VLV_EDP_PSR_IN_TRANS (1<<7) -#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB) +#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB) /* HSW+ eDP PSR registers */ -#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) -#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) +#define HSW_EDP_PSR_BASE 0x64800 +#define BDW_EDP_PSR_BASE 0x6f800 +#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0) #define EDP_PSR_ENABLE (1<<31) #define BDW_PSR_SINGLE_FRAME (1<<30) #define EDP_PSR_LINK_STANDBY (1<<27) @@ -3099,14 +3150,10 @@ enum skl_disp_power_wells { #define EDP_PSR_TP1_TIME_0us (3<<4) #define EDP_PSR_IDLE_FRAME_SHIFT 0 -#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) -#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) -#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18) -#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c) -#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20) -#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24) +#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) +#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ -#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40) +#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40) #define EDP_PSR_STATUS_STATE_MASK (7<<29) #define EDP_PSR_STATUS_STATE_IDLE (0<<29) #define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) @@ -3130,15 +3177,15 @@ enum skl_disp_power_wells { #define EDP_PSR_STATUS_SENDING_TP1 (1<<4) #define EDP_PSR_STATUS_IDLE_MASK 0xf -#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44) +#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) #define EDP_PSR_PERF_CNT_MASK 0xffffff -#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60) +#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60) #define EDP_PSR_DEBUG_MASK_LPSP (1<<27) #define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) #define EDP_PSR_DEBUG_MASK_HPD (1<<25) -#define EDP_PSR2_CTL 0x6f900 +#define EDP_PSR2_CTL _MMIO(0x6f900) #define EDP_PSR2_ENABLE (1<<31) #define EDP_SU_TRACK_ENABLE (1<<30) #define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20) @@ -3153,9 +3200,9 @@ enum skl_disp_power_wells { #define EDP_PSR2_IDLE_MASK 0xf /* VGA port control */ -#define ADPA 0x61100 -#define PCH_ADPA 0xe1100 -#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) +#define ADPA _MMIO(0x61100) +#define PCH_ADPA _MMIO(0xe1100) +#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100) #define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_DISABLE 0 @@ -3201,7 +3248,7 @@ enum skl_disp_power_wells { /* Hotplug control (945+ only) */ -#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) +#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110) #define PORTB_HOTPLUG_INT_EN (1 << 29) #define PORTC_HOTPLUG_INT_EN (1 << 28) #define PORTD_HOTPLUG_INT_EN (1 << 27) @@ -3231,7 +3278,7 @@ enum skl_disp_power_wells { #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) -#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) +#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) /* * HDMI/DP bits are gen4+ * @@ -3296,21 +3343,23 @@ enum skl_disp_power_wells { /* SDVO and HDMI port control. * The same register may be used for SDVO or HDMI */ -#define GEN3_SDVOB 0x61140 -#define GEN3_SDVOC 0x61160 +#define _GEN3_SDVOB 0x61140 +#define _GEN3_SDVOC 0x61160 +#define GEN3_SDVOB _MMIO(_GEN3_SDVOB) +#define GEN3_SDVOC _MMIO(_GEN3_SDVOC) #define GEN4_HDMIB GEN3_SDVOB #define GEN4_HDMIC GEN3_SDVOC -#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB) -#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC) -#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C) -#define PCH_SDVOB 0xe1140 +#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140) +#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160) +#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C) +#define PCH_SDVOB _MMIO(0xe1140) #define PCH_HDMIB PCH_SDVOB -#define PCH_HDMIC 0xe1150 -#define PCH_HDMID 0xe1160 +#define PCH_HDMIC _MMIO(0xe1150) +#define PCH_HDMID _MMIO(0xe1160) -#define PORT_DFT_I9XX 0x61150 +#define PORT_DFT_I9XX _MMIO(0x61150) #define DC_BALANCE_RESET (1 << 25) -#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) +#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) #define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ @@ -3370,9 +3419,12 @@ enum skl_disp_power_wells { /* DVO port control */ -#define DVOA 0x61120 -#define DVOB 0x61140 -#define DVOC 0x61160 +#define _DVOA 0x61120 +#define DVOA _MMIO(_DVOA) +#define _DVOB 0x61140 +#define DVOB _MMIO(_DVOB) +#define _DVOC 0x61160 +#define DVOC _MMIO(_DVOC) #define DVO_ENABLE (1 << 31) #define DVO_PIPE_B_SELECT (1 << 30) #define DVO_PIPE_STALL_UNUSED (0 << 28) @@ -3397,14 +3449,14 @@ enum skl_disp_power_wells { #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ #define DVO_PRESERVE_MASK (0x7<<24) -#define DVOA_SRCDIM 0x61124 -#define DVOB_SRCDIM 0x61144 -#define DVOC_SRCDIM 0x61164 +#define DVOA_SRCDIM _MMIO(0x61124) +#define DVOB_SRCDIM _MMIO(0x61144) +#define DVOC_SRCDIM _MMIO(0x61164) #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 #define DVO_SRCDIM_VERTICAL_SHIFT 0 /* LVDS port control */ -#define LVDS 0x61180 +#define LVDS _MMIO(0x61180) /* * Enables the LVDS port. This bit must be set before DPLLs are enabled, as * the DPLL semantics change when the LVDS is assigned to that pipe. @@ -3454,13 +3506,13 @@ enum skl_disp_power_wells { #define LVDS_B0B3_POWER_UP (3 << 2) /* Video Data Island Packet control */ -#define VIDEO_DIP_DATA 0x61178 +#define VIDEO_DIP_DATA _MMIO(0x61178) /* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 #define VIDEO_DIP_VSC_DATA_SIZE 36 -#define VIDEO_DIP_CTL 0x61170 +#define VIDEO_DIP_CTL _MMIO(0x61170) /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) #define VIDEO_DIP_PORT(port) ((port) << 29) @@ -3487,7 +3539,7 @@ enum skl_disp_power_wells { #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) /* Panel power sequencing */ -#define PP_STATUS 0x61200 +#define PP_STATUS _MMIO(0x61200) #define PP_ON (1 << 31) /* * Indicates that all dependencies of the panel are on: @@ -3513,14 +3565,14 @@ enum skl_disp_power_wells { #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) #define PP_SEQUENCE_STATE_RESET (0xf << 0) -#define PP_CONTROL 0x61204 +#define PP_CONTROL _MMIO(0x61204) #define POWER_TARGET_ON (1 << 0) -#define PP_ON_DELAYS 0x61208 -#define PP_OFF_DELAYS 0x6120c -#define PP_DIVISOR 0x61210 +#define PP_ON_DELAYS _MMIO(0x61208) +#define PP_OFF_DELAYS _MMIO(0x6120c) +#define PP_DIVISOR _MMIO(0x61210) /* Panel fitting */ -#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) +#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230) #define PFIT_ENABLE (1 << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 @@ -3538,7 +3590,7 @@ enum skl_disp_power_wells { #define PFIT_SCALING_PROGRAMMED (1 << 26) #define PFIT_SCALING_PILLAR (2 << 26) #define PFIT_SCALING_LETTER (3 << 26) -#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) +#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234) /* Pre-965 */ #define PFIT_VERT_SCALE_SHIFT 20 #define PFIT_VERT_SCALE_MASK 0xfff00000 @@ -3550,25 +3602,25 @@ enum skl_disp_power_wells { #define PFIT_HORIZ_SCALE_SHIFT_965 0 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff -#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) +#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238) #define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) #define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) -#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ - _VLV_BLC_PWM_CTL2_B) +#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ + _VLV_BLC_PWM_CTL2_B) #define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) #define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) -#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ - _VLV_BLC_PWM_CTL_B) +#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ + _VLV_BLC_PWM_CTL_B) #define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) #define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) -#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ - _VLV_BLC_HIST_CTL_B) +#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ + _VLV_BLC_HIST_CTL_B) /* Backlight control */ -#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ +#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ #define BLM_PWM_ENABLE (1 << 31) #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ #define BLM_PIPE_SELECT (1 << 29) @@ -3591,7 +3643,7 @@ enum skl_disp_power_wells { #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) #define BLM_PHASE_IN_INCR_SHIFT (0) #define BLM_PHASE_IN_INCR_MASK (0xff << 0) -#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) +#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254) /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. @@ -3613,25 +3665,25 @@ enum skl_disp_power_wells { #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ -#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) +#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260) #define BLM_HISTOGRAM_ENABLE (1 << 31) /* New registers for PCH-split platforms. Safe where new bits show up, the * register layout machtes with gen4 BLC_PWM_CTL[12]. */ -#define BLC_PWM_CPU_CTL2 0x48250 -#define BLC_PWM_CPU_CTL 0x48254 +#define BLC_PWM_CPU_CTL2 _MMIO(0x48250) +#define BLC_PWM_CPU_CTL _MMIO(0x48254) -#define HSW_BLC_PWM2_CTL 0x48350 +#define HSW_BLC_PWM2_CTL _MMIO(0x48350) /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ -#define BLC_PWM_PCH_CTL1 0xc8250 +#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250) #define BLM_PCH_PWM_ENABLE (1 << 31) #define BLM_PCH_OVERRIDE_ENABLE (1 << 30) #define BLM_PCH_POLARITY (1 << 29) -#define BLC_PWM_PCH_CTL2 0xc8254 +#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254) -#define UTIL_PIN_CTL 0x48400 +#define UTIL_PIN_CTL _MMIO(0x48400) #define UTIL_PIN_ENABLE (1 << 31) #define UTIL_PIN_PIPE(x) ((x) << 29) @@ -3651,18 +3703,18 @@ enum skl_disp_power_wells { #define _BXT_BLC_PWM_FREQ2 0xC8354 #define _BXT_BLC_PWM_DUTY2 0xC8358 -#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \ +#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \ _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) -#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \ +#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \ _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) -#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \ +#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \ _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) -#define PCH_GTC_CTL 0xe7000 +#define PCH_GTC_CTL _MMIO(0xe7000) #define PCH_GTC_ENABLE (1 << 31) /* TV port control */ -#define TV_CTL 0x68000 +#define TV_CTL _MMIO(0x68000) /* Enables the TV encoder */ # define TV_ENC_ENABLE (1 << 31) /* Sources the TV encoder input from pipe B instead of A. */ @@ -3729,7 +3781,7 @@ enum skl_disp_power_wells { # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) # define TV_TEST_MODE_MASK (7 << 0) -#define TV_DAC 0x68004 +#define TV_DAC _MMIO(0x68004) # define TV_DAC_SAVE 0x00ffff00 /* * Reports that DAC state change logic has reported change (RO). @@ -3780,13 +3832,13 @@ enum skl_disp_power_wells { * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with * -1 (0x3) being the only legal negative value. */ -#define TV_CSC_Y 0x68010 +#define TV_CSC_Y _MMIO(0x68010) # define TV_RY_MASK 0x07ff0000 # define TV_RY_SHIFT 16 # define TV_GY_MASK 0x00000fff # define TV_GY_SHIFT 0 -#define TV_CSC_Y2 0x68014 +#define TV_CSC_Y2 _MMIO(0x68014) # define TV_BY_MASK 0x07ff0000 # define TV_BY_SHIFT 16 /* @@ -3797,13 +3849,13 @@ enum skl_disp_power_wells { # define TV_AY_MASK 0x000003ff # define TV_AY_SHIFT 0 -#define TV_CSC_U 0x68018 +#define TV_CSC_U _MMIO(0x68018) # define TV_RU_MASK 0x07ff0000 # define TV_RU_SHIFT 16 # define TV_GU_MASK 0x000007ff # define TV_GU_SHIFT 0 -#define TV_CSC_U2 0x6801c +#define TV_CSC_U2 _MMIO(0x6801c) # define TV_BU_MASK 0x07ff0000 # define TV_BU_SHIFT 16 /* @@ -3814,13 +3866,13 @@ enum skl_disp_power_wells { # define TV_AU_MASK 0x000003ff # define TV_AU_SHIFT 0 -#define TV_CSC_V 0x68020 +#define TV_CSC_V _MMIO(0x68020) # define TV_RV_MASK 0x0fff0000 # define TV_RV_SHIFT 16 # define TV_GV_MASK 0x000007ff # define TV_GV_SHIFT 0 -#define TV_CSC_V2 0x68024 +#define TV_CSC_V2 _MMIO(0x68024) # define TV_BV_MASK 0x07ff0000 # define TV_BV_SHIFT 16 /* @@ -3831,7 +3883,7 @@ enum skl_disp_power_wells { # define TV_AV_MASK 0x000007ff # define TV_AV_SHIFT 0 -#define TV_CLR_KNOBS 0x68028 +#define TV_CLR_KNOBS _MMIO(0x68028) /* 2s-complement brightness adjustment */ # define TV_BRIGHTNESS_MASK 0xff000000 # define TV_BRIGHTNESS_SHIFT 24 @@ -3845,7 +3897,7 @@ enum skl_disp_power_wells { # define TV_HUE_MASK 0x000000ff # define TV_HUE_SHIFT 0 -#define TV_CLR_LEVEL 0x6802c +#define TV_CLR_LEVEL _MMIO(0x6802c) /* Controls the DAC level for black */ # define TV_BLACK_LEVEL_MASK 0x01ff0000 # define TV_BLACK_LEVEL_SHIFT 16 @@ -3853,7 +3905,7 @@ enum skl_disp_power_wells { # define TV_BLANK_LEVEL_MASK 0x000001ff # define TV_BLANK_LEVEL_SHIFT 0 -#define TV_H_CTL_1 0x68030 +#define TV_H_CTL_1 _MMIO(0x68030) /* Number of pixels in the hsync. */ # define TV_HSYNC_END_MASK 0x1fff0000 # define TV_HSYNC_END_SHIFT 16 @@ -3861,7 +3913,7 @@ enum skl_disp_power_wells { # define TV_HTOTAL_MASK 0x00001fff # define TV_HTOTAL_SHIFT 0 -#define TV_H_CTL_2 0x68034 +#define TV_H_CTL_2 _MMIO(0x68034) /* Enables the colorburst (needed for non-component color) */ # define TV_BURST_ENA (1 << 31) /* Offset of the colorburst from the start of hsync, in pixels minus one. */ @@ -3871,7 +3923,7 @@ enum skl_disp_power_wells { # define TV_HBURST_LEN_SHIFT 0 # define TV_HBURST_LEN_MASK 0x0001fff -#define TV_H_CTL_3 0x68038 +#define TV_H_CTL_3 _MMIO(0x68038) /* End of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_END_SHIFT 16 # define TV_HBLANK_END_MASK 0x1fff0000 @@ -3879,7 +3931,7 @@ enum skl_disp_power_wells { # define TV_HBLANK_START_SHIFT 0 # define TV_HBLANK_START_MASK 0x0001fff -#define TV_V_CTL_1 0x6803c +#define TV_V_CTL_1 _MMIO(0x6803c) /* XXX */ # define TV_NBR_END_SHIFT 16 # define TV_NBR_END_MASK 0x07ff0000 @@ -3890,7 +3942,7 @@ enum skl_disp_power_wells { # define TV_VI_END_F2_SHIFT 0 # define TV_VI_END_F2_MASK 0x0000003f -#define TV_V_CTL_2 0x68040 +#define TV_V_CTL_2 _MMIO(0x68040) /* Length of vsync, in half lines */ # define TV_VSYNC_LEN_MASK 0x07ff0000 # define TV_VSYNC_LEN_SHIFT 16 @@ -3906,7 +3958,7 @@ enum skl_disp_power_wells { # define TV_VSYNC_START_F2_MASK 0x0000007f # define TV_VSYNC_START_F2_SHIFT 0 -#define TV_V_CTL_3 0x68044 +#define TV_V_CTL_3 _MMIO(0x68044) /* Enables generation of the equalization signal */ # define TV_EQUAL_ENA (1 << 31) /* Length of vsync, in half lines */ @@ -3924,7 +3976,7 @@ enum skl_disp_power_wells { # define TV_VEQ_START_F2_MASK 0x000007f # define TV_VEQ_START_F2_SHIFT 0 -#define TV_V_CTL_4 0x68048 +#define TV_V_CTL_4 _MMIO(0x68048) /* * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. @@ -3938,7 +3990,7 @@ enum skl_disp_power_wells { # define TV_VBURST_END_F1_MASK 0x000000ff # define TV_VBURST_END_F1_SHIFT 0 -#define TV_V_CTL_5 0x6804c +#define TV_V_CTL_5 _MMIO(0x6804c) /* * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. @@ -3952,7 +4004,7 @@ enum skl_disp_power_wells { # define TV_VBURST_END_F2_MASK 0x000000ff # define TV_VBURST_END_F2_SHIFT 0 -#define TV_V_CTL_6 0x68050 +#define TV_V_CTL_6 _MMIO(0x68050) /* * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. @@ -3966,7 +4018,7 @@ enum skl_disp_power_wells { # define TV_VBURST_END_F3_MASK 0x000000ff # define TV_VBURST_END_F3_SHIFT 0 -#define TV_V_CTL_7 0x68054 +#define TV_V_CTL_7 _MMIO(0x68054) /* * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. @@ -3980,7 +4032,7 @@ enum skl_disp_power_wells { # define TV_VBURST_END_F4_MASK 0x000000ff # define TV_VBURST_END_F4_SHIFT 0 -#define TV_SC_CTL_1 0x68060 +#define TV_SC_CTL_1 _MMIO(0x68060) /* Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA1_EN (1 << 31) /* Turns on the first subcarrier phase generation DDA */ @@ -4002,7 +4054,7 @@ enum skl_disp_power_wells { # define TV_SCDDA1_INC_MASK 0x00000fff # define TV_SCDDA1_INC_SHIFT 0 -#define TV_SC_CTL_2 0x68064 +#define TV_SC_CTL_2 _MMIO(0x68064) /* Sets the rollover for the second subcarrier phase generation DDA */ # define TV_SCDDA2_SIZE_MASK 0x7fff0000 # define TV_SCDDA2_SIZE_SHIFT 16 @@ -4010,7 +4062,7 @@ enum skl_disp_power_wells { # define TV_SCDDA2_INC_MASK 0x00007fff # define TV_SCDDA2_INC_SHIFT 0 -#define TV_SC_CTL_3 0x68068 +#define TV_SC_CTL_3 _MMIO(0x68068) /* Sets the rollover for the third subcarrier phase generation DDA */ # define TV_SCDDA3_SIZE_MASK 0x7fff0000 # define TV_SCDDA3_SIZE_SHIFT 16 @@ -4018,7 +4070,7 @@ enum skl_disp_power_wells { # define TV_SCDDA3_INC_MASK 0x00007fff # define TV_SCDDA3_INC_SHIFT 0 -#define TV_WIN_POS 0x68070 +#define TV_WIN_POS _MMIO(0x68070) /* X coordinate of the display from the start of horizontal active */ # define TV_XPOS_MASK 0x1fff0000 # define TV_XPOS_SHIFT 16 @@ -4026,7 +4078,7 @@ enum skl_disp_power_wells { # define TV_YPOS_MASK 0x00000fff # define TV_YPOS_SHIFT 0 -#define TV_WIN_SIZE 0x68074 +#define TV_WIN_SIZE _MMIO(0x68074) /* Horizontal size of the display window, measured in pixels*/ # define TV_XSIZE_MASK 0x1fff0000 # define TV_XSIZE_SHIFT 16 @@ -4038,7 +4090,7 @@ enum skl_disp_power_wells { # define TV_YSIZE_MASK 0x00000fff # define TV_YSIZE_SHIFT 0 -#define TV_FILTER_CTL_1 0x68080 +#define TV_FILTER_CTL_1 _MMIO(0x68080) /* * Enables automatic scaling calculation. * @@ -4071,7 +4123,7 @@ enum skl_disp_power_wells { # define TV_HSCALE_FRAC_MASK 0x00003fff # define TV_HSCALE_FRAC_SHIFT 0 -#define TV_FILTER_CTL_2 0x68084 +#define TV_FILTER_CTL_2 _MMIO(0x68084) /* * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * @@ -4087,7 +4139,7 @@ enum skl_disp_power_wells { # define TV_VSCALE_FRAC_MASK 0x00007fff # define TV_VSCALE_FRAC_SHIFT 0 -#define TV_FILTER_CTL_3 0x68088 +#define TV_FILTER_CTL_3 _MMIO(0x68088) /* * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * @@ -4107,7 +4159,7 @@ enum skl_disp_power_wells { # define TV_VSCALE_IP_FRAC_MASK 0x00007fff # define TV_VSCALE_IP_FRAC_SHIFT 0 -#define TV_CC_CONTROL 0x68090 +#define TV_CC_CONTROL _MMIO(0x68090) # define TV_CC_ENABLE (1 << 31) /* * Specifies which field to send the CC data in. @@ -4123,7 +4175,7 @@ enum skl_disp_power_wells { # define TV_CC_LINE_MASK 0x0000003f # define TV_CC_LINE_SHIFT 0 -#define TV_CC_DATA 0x68094 +#define TV_CC_DATA _MMIO(0x68094) # define TV_CC_RDY (1 << 31) /* Second word of CC data to be transmitted. */ # define TV_CC_DATA_2_MASK 0x007f0000 @@ -4132,20 +4184,20 @@ enum skl_disp_power_wells { # define TV_CC_DATA_1_MASK 0x0000007f # define TV_CC_DATA_1_SHIFT 0 -#define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */ -#define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */ -#define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */ -#define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */ +#define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */ +#define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */ +#define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */ +#define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */ /* Display Port */ -#define DP_A 0x64000 /* eDP */ -#define DP_B 0x64100 -#define DP_C 0x64200 -#define DP_D 0x64300 +#define DP_A _MMIO(0x64000) /* eDP */ +#define DP_B _MMIO(0x64100) +#define DP_C _MMIO(0x64200) +#define DP_D _MMIO(0x64300) -#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B) -#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C) -#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D) +#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100) +#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200) +#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300) #define DP_PORT_EN (1 << 31) #define DP_PIPEB_SELECT (1 << 30) @@ -4199,7 +4251,7 @@ enum skl_disp_power_wells { /* eDP */ #define DP_PLL_FREQ_270MHZ (0 << 16) -#define DP_PLL_FREQ_160MHZ (1 << 16) +#define DP_PLL_FREQ_162MHZ (1 << 16) #define DP_PLL_FREQ_MASK (3 << 16) /* locked once port is enabled */ @@ -4232,33 +4284,36 @@ enum skl_disp_power_wells { * is 20 bytes in each direction, hence the 5 fixed * data registers */ -#define DPA_AUX_CH_CTL 0x64010 -#define DPA_AUX_CH_DATA1 0x64014 -#define DPA_AUX_CH_DATA2 0x64018 -#define DPA_AUX_CH_DATA3 0x6401c -#define DPA_AUX_CH_DATA4 0x64020 -#define DPA_AUX_CH_DATA5 0x64024 - -#define DPB_AUX_CH_CTL 0x64110 -#define DPB_AUX_CH_DATA1 0x64114 -#define DPB_AUX_CH_DATA2 0x64118 -#define DPB_AUX_CH_DATA3 0x6411c -#define DPB_AUX_CH_DATA4 0x64120 -#define DPB_AUX_CH_DATA5 0x64124 - -#define DPC_AUX_CH_CTL 0x64210 -#define DPC_AUX_CH_DATA1 0x64214 -#define DPC_AUX_CH_DATA2 0x64218 -#define DPC_AUX_CH_DATA3 0x6421c -#define DPC_AUX_CH_DATA4 0x64220 -#define DPC_AUX_CH_DATA5 0x64224 - -#define DPD_AUX_CH_CTL 0x64310 -#define DPD_AUX_CH_DATA1 0x64314 -#define DPD_AUX_CH_DATA2 0x64318 -#define DPD_AUX_CH_DATA3 0x6431c -#define DPD_AUX_CH_DATA4 0x64320 -#define DPD_AUX_CH_DATA5 0x64324 +#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010) +#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014) +#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018) +#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c) +#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020) +#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024) + +#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110) +#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114) +#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118) +#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c) +#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120) +#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124) + +#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210) +#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214) +#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218) +#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c) +#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220) +#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224) + +#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310) +#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314) +#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318) +#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c) +#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320) +#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324) + +#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) +#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) #define DP_AUX_CH_CTL_DONE (1 << 30) @@ -4335,10 +4390,10 @@ enum skl_disp_power_wells { #define _PIPEB_LINK_N_G4X 0x71064 #define PIPEA_DP_LINK_N_MASK (0xffffff) -#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) -#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) -#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) -#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) +#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) +#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) +#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) +#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) /* Display & cursor control */ @@ -4454,15 +4509,15 @@ enum skl_disp_power_wells { */ #define PIPE_EDP_OFFSET 0x7f000 -#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ +#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ dev_priv->info.display_mmio_offset) -#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) -#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) -#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) -#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) -#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) +#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) +#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) +#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH) +#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL) +#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT) #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 @@ -4474,9 +4529,9 @@ enum skl_disp_power_wells { #define PIPEMISC_DITHER_ENABLE (1<<4) #define PIPEMISC_DITHER_TYPE_MASK (3<<2) #define PIPEMISC_DITHER_TYPE_SP (0<<2) -#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) +#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) -#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) +#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN (1<<29) #define PIPEB_HLINE_INT_EN (1<<28) #define PIPEB_VBLANK_INT_EN (1<<27) @@ -4497,7 +4552,7 @@ enum skl_disp_power_wells { #define SPRITEE_FLIPDONE_INT_EN (1<<9) #define PLANEC_FLIPDONE_INT_EN (1<<8) -#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ +#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ #define SPRITEF_INVALID_GTT_INT_EN (1<<27) #define SPRITEE_INVALID_GTT_INT_EN (1<<26) #define PLANEC_INVALID_GTT_INT_EN (1<<25) @@ -4527,7 +4582,7 @@ enum skl_disp_power_wells { #define DPINVGTT_STATUS_MASK 0xff #define DPINVGTT_STATUS_MASK_CHV 0xfff -#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030) +#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) @@ -4542,7 +4597,7 @@ enum skl_disp_power_wells { #define DSPARB_SPRITEC_MASK_VLV (0xff << 16) #define DSPARB_SPRITED_SHIFT_VLV 24 #define DSPARB_SPRITED_MASK_VLV (0xff << 24) -#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ +#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ #define DSPARB_SPRITEA_HI_SHIFT_VLV 0 #define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0) #define DSPARB_SPRITEB_HI_SHIFT_VLV 4 @@ -4555,14 +4610,14 @@ enum skl_disp_power_wells { #define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16) #define DSPARB_SPRITEF_HI_SHIFT_VLV 20 #define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20) -#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ +#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */ #define DSPARB_SPRITEE_SHIFT_VLV 0 #define DSPARB_SPRITEE_MASK_VLV (0xff << 0) #define DSPARB_SPRITEF_SHIFT_VLV 8 #define DSPARB_SPRITEF_MASK_VLV (0xff << 8) /* pnv/gen4/g4x/vlv/chv */ -#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) +#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034) #define DSPFW_SR_SHIFT 23 #define DSPFW_SR_MASK (0x1ff<<23) #define DSPFW_CURSORB_SHIFT 16 @@ -4573,7 +4628,7 @@ enum skl_disp_power_wells { #define DSPFW_PLANEA_SHIFT 0 #define DSPFW_PLANEA_MASK (0x7f<<0) #define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */ -#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) +#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038) #define DSPFW_FBC_SR_EN (1<<31) /* g4x */ #define DSPFW_FBC_SR_SHIFT 28 #define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */ @@ -4589,7 +4644,7 @@ enum skl_disp_power_wells { #define DSPFW_SPRITEA_SHIFT 0 #define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ #define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ -#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) +#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c) #define DSPFW_HPLL_SR_EN (1<<31) #define PINEVIEW_SELF_REFRESH_EN (1<<30) #define DSPFW_CURSOR_SR_SHIFT 24 @@ -4600,14 +4655,14 @@ enum skl_disp_power_wells { #define DSPFW_HPLL_SR_MASK (0x1ff<<0) /* vlv/chv */ -#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070) +#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070) #define DSPFW_SPRITEB_WM1_SHIFT 16 #define DSPFW_SPRITEB_WM1_MASK (0xff<<16) #define DSPFW_CURSORA_WM1_SHIFT 8 #define DSPFW_CURSORA_WM1_MASK (0x3f<<8) #define DSPFW_SPRITEA_WM1_SHIFT 0 #define DSPFW_SPRITEA_WM1_MASK (0xff<<0) -#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074) +#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074) #define DSPFW_PLANEB_WM1_SHIFT 24 #define DSPFW_PLANEB_WM1_MASK (0xff<<24) #define DSPFW_PLANEA_WM1_SHIFT 16 @@ -4616,11 +4671,11 @@ enum skl_disp_power_wells { #define DSPFW_CURSORB_WM1_MASK (0x3f<<8) #define DSPFW_CURSOR_SR_WM1_SHIFT 0 #define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0) -#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078) +#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078) #define DSPFW_SR_WM1_SHIFT 0 #define DSPFW_SR_WM1_MASK (0x1ff<<0) -#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c) -#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */ +#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c) +#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */ #define DSPFW_SPRITED_WM1_SHIFT 24 #define DSPFW_SPRITED_WM1_MASK (0xff<<24) #define DSPFW_SPRITED_SHIFT 16 @@ -4629,7 +4684,7 @@ enum skl_disp_power_wells { #define DSPFW_SPRITEC_WM1_MASK (0xff<<8) #define DSPFW_SPRITEC_SHIFT 0 #define DSPFW_SPRITEC_MASK_VLV (0xff<<0) -#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) +#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8) #define DSPFW_SPRITEF_WM1_SHIFT 24 #define DSPFW_SPRITEF_WM1_MASK (0xff<<24) #define DSPFW_SPRITEF_SHIFT 16 @@ -4638,7 +4693,7 @@ enum skl_disp_power_wells { #define DSPFW_SPRITEE_WM1_MASK (0xff<<8) #define DSPFW_SPRITEE_SHIFT 0 #define DSPFW_SPRITEE_MASK_VLV (0xff<<0) -#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ +#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ #define DSPFW_PLANEC_WM1_SHIFT 24 #define DSPFW_PLANEC_WM1_MASK (0xff<<24) #define DSPFW_PLANEC_SHIFT 16 @@ -4649,7 +4704,7 @@ enum skl_disp_power_wells { #define DSPFW_CURSORC_MASK (0x3f<<0) /* vlv/chv high order bits */ -#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) +#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064) #define DSPFW_SR_HI_SHIFT 24 #define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_HI_SHIFT 23 @@ -4670,7 +4725,7 @@ enum skl_disp_power_wells { #define DSPFW_SPRITEA_HI_MASK (1<<4) #define DSPFW_PLANEA_HI_SHIFT 0 #define DSPFW_PLANEA_HI_MASK (1<<0) -#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) +#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068) #define DSPFW_SR_WM1_HI_SHIFT 24 #define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_WM1_HI_SHIFT 23 @@ -4693,7 +4748,7 @@ enum skl_disp_power_wells { #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) /* drain latency register values*/ -#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) +#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) #define DDL_CURSOR_SHIFT 24 #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) #define DDL_PLANE_SHIFT 0 @@ -4701,7 +4756,7 @@ enum skl_disp_power_wells { #define DDL_PRECISION_LOW (0<<7) #define DRAIN_LATENCY_MASK 0x7f -#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) +#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400) #define CBR_PND_DEADLINE_DISABLE (1<<31) #define CBR_PWM_CLOCK_MUX_SELECT (1<<30) @@ -4739,51 +4794,51 @@ enum skl_disp_power_wells { #define I965_CURSOR_DFT_WM 8 /* Watermark register definitions for SKL */ -#define CUR_WM_A_0 0x70140 -#define CUR_WM_B_0 0x71140 -#define PLANE_WM_1_A_0 0x70240 -#define PLANE_WM_1_B_0 0x71240 -#define PLANE_WM_2_A_0 0x70340 -#define PLANE_WM_2_B_0 0x71340 -#define PLANE_WM_TRANS_1_A_0 0x70268 -#define PLANE_WM_TRANS_1_B_0 0x71268 -#define PLANE_WM_TRANS_2_A_0 0x70368 -#define PLANE_WM_TRANS_2_B_0 0x71368 -#define CUR_WM_TRANS_A_0 0x70168 -#define CUR_WM_TRANS_B_0 0x71168 +#define _CUR_WM_A_0 0x70140 +#define _CUR_WM_B_0 0x71140 +#define _PLANE_WM_1_A_0 0x70240 +#define _PLANE_WM_1_B_0 0x71240 +#define _PLANE_WM_2_A_0 0x70340 +#define _PLANE_WM_2_B_0 0x71340 +#define _PLANE_WM_TRANS_1_A_0 0x70268 +#define _PLANE_WM_TRANS_1_B_0 0x71268 +#define _PLANE_WM_TRANS_2_A_0 0x70368 +#define _PLANE_WM_TRANS_2_B_0 0x71368 +#define _CUR_WM_TRANS_A_0 0x70168 +#define _CUR_WM_TRANS_B_0 0x71168 #define PLANE_WM_EN (1 << 31) #define PLANE_WM_LINES_SHIFT 14 #define PLANE_WM_LINES_MASK 0x1f #define PLANE_WM_BLOCKS_MASK 0x3ff -#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0) -#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level))) -#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0) +#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0) +#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level))) +#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A_0, _CUR_WM_TRANS_B_0) -#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0) -#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0) +#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0) +#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0) #define _PLANE_WM_BASE(pipe, plane) \ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe)) #define PLANE_WM(pipe, plane, level) \ - (_PLANE_WM_BASE(pipe, plane) + ((4) * (level))) + _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level))) #define _PLANE_WM_TRANS_1(pipe) \ - _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0) + _PIPE(pipe, _PLANE_WM_TRANS_1_A_0, _PLANE_WM_TRANS_1_B_0) #define _PLANE_WM_TRANS_2(pipe) \ - _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0) + _PIPE(pipe, _PLANE_WM_TRANS_2_A_0, _PLANE_WM_TRANS_2_B_0) #define PLANE_WM_TRANS(pipe, plane) \ - _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)) + _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))) /* define the Watermark register on Ironlake */ -#define WM0_PIPEA_ILK 0x45100 +#define WM0_PIPEA_ILK _MMIO(0x45100) #define WM0_PIPE_PLANE_MASK (0xffff<<16) #define WM0_PIPE_PLANE_SHIFT 16 #define WM0_PIPE_SPRITE_MASK (0xff<<8) #define WM0_PIPE_SPRITE_SHIFT 8 #define WM0_PIPE_CURSOR_MASK (0xff) -#define WM0_PIPEB_ILK 0x45104 -#define WM0_PIPEC_IVB 0x45200 -#define WM1_LP_ILK 0x45108 +#define WM0_PIPEB_ILK _MMIO(0x45104) +#define WM0_PIPEC_IVB _MMIO(0x45200) +#define WM1_LP_ILK _MMIO(0x45108) #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) @@ -4793,13 +4848,13 @@ enum skl_disp_power_wells { #define WM1_LP_SR_MASK (0x7ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0xff) -#define WM2_LP_ILK 0x4510c +#define WM2_LP_ILK _MMIO(0x4510c) #define WM2_LP_EN (1<<31) -#define WM3_LP_ILK 0x45110 +#define WM3_LP_ILK _MMIO(0x45110) #define WM3_LP_EN (1<<31) -#define WM1S_LP_ILK 0x45120 -#define WM2S_LP_IVB 0x45124 -#define WM3S_LP_IVB 0x45128 +#define WM1S_LP_ILK _MMIO(0x45120) +#define WM2S_LP_IVB _MMIO(0x45124) +#define WM3S_LP_IVB _MMIO(0x45128) #define WM1S_LP_EN (1<<31) #define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ @@ -4807,7 +4862,7 @@ enum skl_disp_power_wells { ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur)) /* Memory latency timer register */ -#define MLTR_ILK 0x11222 +#define MLTR_ILK _MMIO(0x11222) #define MLTR_WM1_SHIFT 0 #define MLTR_WM2_SHIFT 8 /* the unit of memory self-refresh latency time is 0.5us */ @@ -4815,7 +4870,7 @@ enum skl_disp_power_wells { /* the address where we get all kinds of latency value */ -#define SSKPD 0x5d10 +#define SSKPD _MMIO(0x5d10) #define SSKPD_WM_MASK 0x3f #define SSKPD_WM0_SHIFT 0 #define SSKPD_WM1_SHIFT 8 @@ -4848,8 +4903,8 @@ enum skl_disp_power_wells { /* GM45+ just has to be different */ #define _PIPEA_FRMCOUNT_G4X 0x70040 #define _PIPEA_FLIPCOUNT_G4X 0x70044 -#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X) -#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X) +#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X) +#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X) /* Cursor A & B regs */ #define _CURACNTR 0x70080 @@ -4887,7 +4942,7 @@ enum skl_disp_power_wells { #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 -#define CURSIZE 0x700a0 +#define CURSIZE _MMIO(0x700a0) #define _CURBCNTR 0x700c0 #define _CURBBASE 0x700c4 #define _CURBPOS 0x700c8 @@ -4896,7 +4951,7 @@ enum skl_disp_power_wells { #define _CURBBASE_IVB 0x71084 #define _CURBPOS_IVB 0x71088 -#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \ +#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ dev_priv->info.display_mmio_offset) @@ -4957,16 +5012,16 @@ enum skl_disp_power_wells { #define _DSPAOFFSET 0x701A4 /* HSW */ #define _DSPASURFLIVE 0x701AC -#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) -#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) -#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) -#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) -#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) -#define DSPSURF(plane) _PIPE2(plane, _DSPASURF) -#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) -#define DSPLINOFF(plane) DSPADDR(plane) -#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) -#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) +#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR) +#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR) +#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE) +#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS) +#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE) +#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF) +#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF) +#define DSPLINOFF(plane) DSPADDR(plane) +#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET) +#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE) /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 @@ -4980,11 +5035,11 @@ enum skl_disp_power_wells { #define _PRIMCNSTALPHA_A 0x60a10 #define PRIM_CONST_ALPHA_ENABLE (1<<31) -#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A) -#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A) -#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A) -#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A) -#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A) +#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A) +#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A) +#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A) +#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A) +#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A) /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) @@ -5002,9 +5057,10 @@ enum skl_disp_power_wells { * [10:1f] all * [30:32] all */ -#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) -#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) -#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) +#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) +#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) +#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) +#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4) /* Pipe B */ #define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) @@ -5086,18 +5142,18 @@ enum skl_disp_power_wells { #define _DVSBSCALE 0x73204 #define _DVSBGAMC 0x73300 -#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR) -#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) -#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) -#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS) -#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF) -#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) -#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE) -#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE) -#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) -#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) -#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) -#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) +#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR) +#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) +#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) +#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS) +#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF) +#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) +#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE) +#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE) +#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) +#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) +#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) +#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) #define _SPRA_CTL 0x70280 #define SPRITE_ENABLE (1<<31) @@ -5160,20 +5216,20 @@ enum skl_disp_power_wells { #define _SPRB_SCALE 0x71304 #define _SPRB_GAMC 0x71400 -#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL) -#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) -#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) -#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS) -#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) -#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) -#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) -#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) -#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) -#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) -#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) -#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) -#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) -#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) +#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL) +#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) +#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) +#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS) +#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) +#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) +#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) +#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF) +#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) +#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) +#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) +#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) +#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) +#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) #define SP_ENABLE (1<<31) @@ -5223,18 +5279,18 @@ enum skl_disp_power_wells { #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) -#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) -#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) -#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) -#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) -#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) -#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) -#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) -#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) -#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) -#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) -#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) -#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) +#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) +#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) +#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) +#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) +#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) +#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) +#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) +#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) +#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) +#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) +#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) +#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) /* * CHV pipe B sprite CSC @@ -5243,29 +5299,29 @@ enum skl_disp_power_wells { * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| */ -#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000) -#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000) -#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000) +#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000) +#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000) +#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000) #define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ #define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ -#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000) -#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000) -#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000) -#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000) -#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000) +#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000) +#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000) +#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000) +#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000) +#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000) #define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ #define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ -#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000) -#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000) -#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000) +#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000) +#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000) +#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000) #define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ #define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ -#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000) -#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000) -#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000) +#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000) +#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000) +#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000) #define SPCSC_OMAX(x) ((x) << 16) /* u10 */ #define SPCSC_OMIN(x) ((x) << 0) /* u10 */ @@ -5346,7 +5402,7 @@ enum skl_disp_power_wells { #define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B) #define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B) #define PLANE_CTL(pipe, plane) \ - _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe)) + _MMIO_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe)) #define _PLANE_STRIDE_1_B 0x71188 #define _PLANE_STRIDE_2_B 0x71288 @@ -5358,7 +5414,7 @@ enum skl_disp_power_wells { #define _PLANE_STRIDE_3(pipe) \ _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) #define PLANE_STRIDE(pipe, plane) \ - _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) + _MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) #define _PLANE_POS_1_B 0x7118c #define _PLANE_POS_2_B 0x7128c @@ -5367,7 +5423,7 @@ enum skl_disp_power_wells { #define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B) #define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B) #define PLANE_POS(pipe, plane) \ - _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe)) + _MMIO_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe)) #define _PLANE_SIZE_1_B 0x71190 #define _PLANE_SIZE_2_B 0x71290 @@ -5376,7 +5432,7 @@ enum skl_disp_power_wells { #define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B) #define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B) #define PLANE_SIZE(pipe, plane) \ - _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe)) + _MMIO_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe)) #define _PLANE_SURF_1_B 0x7119c #define _PLANE_SURF_2_B 0x7129c @@ -5385,35 +5441,35 @@ enum skl_disp_power_wells { #define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B) #define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) #define PLANE_SURF(pipe, plane) \ - _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) + _MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) #define _PLANE_OFFSET_1_B 0x711a4 #define _PLANE_OFFSET_2_B 0x712a4 #define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B) #define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B) #define PLANE_OFFSET(pipe, plane) \ - _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe)) + _MMIO_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe)) #define _PLANE_KEYVAL_1_B 0x71194 #define _PLANE_KEYVAL_2_B 0x71294 #define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B) #define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B) #define PLANE_KEYVAL(pipe, plane) \ - _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe)) + _MMIO_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe)) #define _PLANE_KEYMSK_1_B 0x71198 #define _PLANE_KEYMSK_2_B 0x71298 #define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B) #define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B) #define PLANE_KEYMSK(pipe, plane) \ - _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe)) + _MMIO_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe)) #define _PLANE_KEYMAX_1_B 0x711a0 #define _PLANE_KEYMAX_2_B 0x712a0 #define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B) #define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B) #define PLANE_KEYMAX(pipe, plane) \ - _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) + _MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) #define _PLANE_BUF_CFG_1_B 0x7127c #define _PLANE_BUF_CFG_2_B 0x7137c @@ -5422,7 +5478,7 @@ enum skl_disp_power_wells { #define _PLANE_BUF_CFG_2(pipe) \ _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B) #define PLANE_BUF_CFG(pipe, plane) \ - _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) + _MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) #define _PLANE_NV12_BUF_CFG_1_B 0x71278 #define _PLANE_NV12_BUF_CFG_2_B 0x71378 @@ -5431,26 +5487,26 @@ enum skl_disp_power_wells { #define _PLANE_NV12_BUF_CFG_2(pipe) \ _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B) #define PLANE_NV12_BUF_CFG(pipe, plane) \ - _PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe)) + _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe)) /* SKL new cursor registers */ #define _CUR_BUF_CFG_A 0x7017c #define _CUR_BUF_CFG_B 0x7117c -#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B) +#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B) /* VBIOS regs */ -#define VGACNTRL 0x71400 +#define VGACNTRL _MMIO(0x71400) # define VGA_DISP_DISABLE (1 << 31) # define VGA_2X_MODE (1 << 30) # define VGA_PIPE_B_SELECT (1 << 29) -#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) +#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400) /* Ironlake */ -#define CPU_VGACNTRL 0x41000 +#define CPU_VGACNTRL _MMIO(0x41000) -#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 +#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030) #define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) #define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */ #define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */ @@ -5463,26 +5519,26 @@ enum skl_disp_power_wells { #define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0) /* refresh rate hardware control */ -#define RR_HW_CTL 0x45300 +#define RR_HW_CTL _MMIO(0x45300) #define RR_HW_LOW_POWER_FRAMES_MASK 0xff #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 -#define FDI_PLL_BIOS_0 0x46000 +#define FDI_PLL_BIOS_0 _MMIO(0x46000) #define FDI_PLL_FB_CLOCK_MASK 0xff -#define FDI_PLL_BIOS_1 0x46004 -#define FDI_PLL_BIOS_2 0x46008 -#define DISPLAY_PORT_PLL_BIOS_0 0x4600c -#define DISPLAY_PORT_PLL_BIOS_1 0x46010 -#define DISPLAY_PORT_PLL_BIOS_2 0x46014 +#define FDI_PLL_BIOS_1 _MMIO(0x46004) +#define FDI_PLL_BIOS_2 _MMIO(0x46008) +#define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c) +#define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010) +#define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014) -#define PCH_3DCGDIS0 0x46020 +#define PCH_3DCGDIS0 _MMIO(0x46020) # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) -#define PCH_3DCGDIS1 0x46024 +#define PCH_3DCGDIS1 _MMIO(0x46024) # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) -#define FDI_PLL_FREQ_CTL 0x46030 +#define FDI_PLL_FREQ_CTL _MMIO(0x46030) #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff @@ -5519,14 +5575,14 @@ enum skl_disp_power_wells { #define _PIPEB_LINK_M2 0x61048 #define _PIPEB_LINK_N2 0x6104c -#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) -#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) -#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) -#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) -#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) -#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) -#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) -#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) +#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1) +#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1) +#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2) +#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2) +#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1) +#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1) +#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2) +#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ @@ -5549,11 +5605,11 @@ enum skl_disp_power_wells { #define _PFA_HSCALE 0x68090 #define _PFB_HSCALE 0x68890 -#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) -#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) -#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) -#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) -#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) +#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) +#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) +#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) +#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) +#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) #define _PSA_CTL 0x68180 #define _PSB_CTL 0x68980 @@ -5563,9 +5619,9 @@ enum skl_disp_power_wells { #define _PSA_WIN_POS 0x68170 #define _PSB_WIN_POS 0x68970 -#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL) -#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ) -#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS) +#define PS_CTL(pipe) _MMIO_PIPE(pipe, _PSA_CTL, _PSB_CTL) +#define PS_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ) +#define PS_WIN_POS(pipe) _MMIO_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS) /* * Skylake scalers @@ -5654,48 +5710,63 @@ enum skl_disp_power_wells { #define _PS_ECC_STAT_1C 0x691D0 #define _ID(id, a, b) ((a) + (id)*((b)-(a))) -#define SKL_PS_CTRL(pipe, id) _PIPE(pipe, \ +#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) -#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe, \ +#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) -#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe, \ +#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) -#define SKL_PS_WIN_SZ(pipe, id) _PIPE(pipe, \ +#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) -#define SKL_PS_VSCALE(pipe, id) _PIPE(pipe, \ +#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) -#define SKL_PS_HSCALE(pipe, id) _PIPE(pipe, \ +#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) -#define SKL_PS_VPHASE(pipe, id) _PIPE(pipe, \ +#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) -#define SKL_PS_HPHASE(pipe, id) _PIPE(pipe, \ +#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) -#define SKL_PS_ECC_STAT(pipe, id) _PIPE(pipe, \ +#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ - _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B) + _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)) /* legacy palette */ #define _LGC_PALETTE_A 0x4a000 #define _LGC_PALETTE_B 0x4a800 -#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) +#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) #define _GAMMA_MODE_A 0x4a480 #define _GAMMA_MODE_B 0x4ac80 -#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) +#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) #define GAMMA_MODE_MODE_MASK (3 << 0) #define GAMMA_MODE_MODE_8BIT (0 << 0) #define GAMMA_MODE_MODE_10BIT (1 << 0) #define GAMMA_MODE_MODE_12BIT (2 << 0) #define GAMMA_MODE_MODE_SPLIT (3 << 0) +/* DMC/CSR */ +#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4) +#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0 +#define CSR_HTP_ADDR_SKL 0x00500034 +#define CSR_SSP_BASE _MMIO(0x8F074) +#define CSR_HTP_SKL _MMIO(0x8F004) +#define CSR_LAST_WRITE _MMIO(0x8F034) +#define CSR_LAST_WRITE_VALUE 0xc003b400 +/* MMIO address range for CSR program (0x80000 - 0x82FFF) */ +#define CSR_MMIO_START_RANGE 0x80000 +#define CSR_MMIO_END_RANGE 0x8FFFF +#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030) +#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C) +#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038) + /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_SPRITEB_FLIP_DONE (1 << 29) @@ -5747,20 +5818,20 @@ enum skl_disp_power_wells { #define DE_PIPEA_VBLANK_IVB (1<<0) #define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5)) -#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ +#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */ #define MASTER_INTERRUPT_ENABLE (1<<31) -#define DEISR 0x44000 -#define DEIMR 0x44004 -#define DEIIR 0x44008 -#define DEIER 0x4400c +#define DEISR _MMIO(0x44000) +#define DEIMR _MMIO(0x44004) +#define DEIIR _MMIO(0x44008) +#define DEIER _MMIO(0x4400c) -#define GTISR 0x44010 -#define GTIMR 0x44014 -#define GTIIR 0x44018 -#define GTIER 0x4401c +#define GTISR _MMIO(0x44010) +#define GTIMR _MMIO(0x44014) +#define GTIIR _MMIO(0x44018) +#define GTIER _MMIO(0x4401c) -#define GEN8_MASTER_IRQ 0x44200 +#define GEN8_MASTER_IRQ _MMIO(0x44200) #define GEN8_MASTER_IRQ_CONTROL (1<<31) #define GEN8_PCU_IRQ (1<<30) #define GEN8_DE_PCH_IRQ (1<<23) @@ -5777,10 +5848,10 @@ enum skl_disp_power_wells { #define GEN8_GT_BCS_IRQ (1<<1) #define GEN8_GT_RCS_IRQ (1<<0) -#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which))) -#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which))) -#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) -#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) +#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which))) +#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which))) +#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which))) +#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which))) #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 @@ -5789,10 +5860,10 @@ enum skl_disp_power_wells { #define GEN8_VECS_IRQ_SHIFT 0 #define GEN8_WD_IRQ_SHIFT 16 -#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) -#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) -#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe))) -#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe))) +#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe))) +#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe))) +#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe))) +#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe))) #define GEN8_PIPE_FIFO_UNDERRUN (1 << 31) #define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29) #define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) @@ -5825,10 +5896,10 @@ enum skl_disp_power_wells { GEN9_PIPE_PLANE2_FAULT | \ GEN9_PIPE_PLANE1_FAULT) -#define GEN8_DE_PORT_ISR 0x44440 -#define GEN8_DE_PORT_IMR 0x44444 -#define GEN8_DE_PORT_IIR 0x44448 -#define GEN8_DE_PORT_IER 0x4444c +#define GEN8_DE_PORT_ISR _MMIO(0x44440) +#define GEN8_DE_PORT_IMR _MMIO(0x44444) +#define GEN8_DE_PORT_IIR _MMIO(0x44448) +#define GEN8_DE_PORT_IER _MMIO(0x4444c) #define GEN9_AUX_CHANNEL_D (1 << 27) #define GEN9_AUX_CHANNEL_C (1 << 26) #define GEN9_AUX_CHANNEL_B (1 << 25) @@ -5842,23 +5913,23 @@ enum skl_disp_power_wells { #define BXT_DE_PORT_GMBUS (1 << 1) #define GEN8_AUX_CHANNEL_A (1 << 0) -#define GEN8_DE_MISC_ISR 0x44460 -#define GEN8_DE_MISC_IMR 0x44464 -#define GEN8_DE_MISC_IIR 0x44468 -#define GEN8_DE_MISC_IER 0x4446c +#define GEN8_DE_MISC_ISR _MMIO(0x44460) +#define GEN8_DE_MISC_IMR _MMIO(0x44464) +#define GEN8_DE_MISC_IIR _MMIO(0x44468) +#define GEN8_DE_MISC_IER _MMIO(0x4446c) #define GEN8_DE_MISC_GSE (1 << 27) -#define GEN8_PCU_ISR 0x444e0 -#define GEN8_PCU_IMR 0x444e4 -#define GEN8_PCU_IIR 0x444e8 -#define GEN8_PCU_IER 0x444ec +#define GEN8_PCU_ISR _MMIO(0x444e0) +#define GEN8_PCU_IMR _MMIO(0x444e4) +#define GEN8_PCU_IIR _MMIO(0x444e8) +#define GEN8_PCU_IER _MMIO(0x444ec) -#define ILK_DISPLAY_CHICKEN2 0x42004 +#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT (1 << 25) #define ILK_DPARB_GATE (1<<22) #define ILK_VSDPFD_FULL (1<<21) -#define FUSE_STRAP 0x42014 +#define FUSE_STRAP _MMIO(0x42014) #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) #define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) @@ -5867,18 +5938,18 @@ enum skl_disp_power_wells { #define HSW_CDCLK_LIMIT (1 << 24) #define ILK_DESKTOP (1 << 23) -#define ILK_DSPCLK_GATE_D 0x42020 +#define ILK_DSPCLK_GATE_D _MMIO(0x42020) #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) #define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) #define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) #define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) -#define IVB_CHICKEN3 0x4200c +#define IVB_CHICKEN3 _MMIO(0x4200c) # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) -#define CHICKEN_PAR1_1 0x42080 +#define CHICKEN_PAR1_1 _MMIO(0x42080) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) @@ -5886,70 +5957,70 @@ enum skl_disp_power_wells { #define _CHICKEN_PIPESL_1_B 0x420b4 #define HSW_FBCQ_DIS (1 << 22) #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) -#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) +#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) -#define DISP_ARB_CTL 0x45000 +#define DISP_ARB_CTL _MMIO(0x45000) #define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_FBC_WM_DIS (1<<15) -#define DISP_ARB_CTL2 0x45004 +#define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_DATA_PARTITION_5_6 (1<<6) -#define DBUF_CTL 0x45008 +#define DBUF_CTL _MMIO(0x45008) #define DBUF_POWER_REQUEST (1<<31) #define DBUF_POWER_STATE (1<<30) -#define GEN7_MSG_CTL 0x45010 +#define GEN7_MSG_CTL _MMIO(0x45010) #define WAIT_FOR_PCH_RESET_ACK (1<<1) #define WAIT_FOR_PCH_FLR_ACK (1<<0) -#define HSW_NDE_RSTWRN_OPT 0x46408 +#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) -#define SKL_DFSM 0x51000 +#define SKL_DFSM _MMIO(0x51000) #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) #define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) #define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) #define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) -#define FF_SLICE_CS_CHICKEN2 0x20e4 +#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) /* GEN7 chicken */ -#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 +#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) -#define COMMON_SLICE_CHICKEN2 0x7014 +#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) -#define HIZ_CHICKEN 0x7018 +#define HIZ_CHICKEN _MMIO(0x7018) # define CHV_HZ_8X8_MODE_IN_1X (1<<15) # define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3) -#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308 +#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) #define DISABLE_PIXEL_MASK_CAMMING (1<<14) -#define GEN7_L3SQCREG1 0xB010 +#define GEN7_L3SQCREG1 _MMIO(0xB010) #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 -#define GEN8_L3SQCREG1 0xB100 +#define GEN8_L3SQCREG1 _MMIO(0xB100) #define BDW_WA_L3SQCREG1_DEFAULT 0x784000 -#define GEN7_L3CNTLREG1 0xB01C +#define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_L3AGDIS (1<<19) -#define GEN7_L3CNTLREG2 0xB020 -#define GEN7_L3CNTLREG3 0xB024 +#define GEN7_L3CNTLREG2 _MMIO(0xB020) +#define GEN7_L3CNTLREG3 _MMIO(0xB024) -#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 +#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030) #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 -#define GEN7_L3SQCREG4 0xb034 +#define GEN7_L3SQCREG4 _MMIO(0xb034) #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) -#define GEN8_L3SQCREG4 0xb118 +#define GEN8_L3SQCREG4 _MMIO(0xb118) #define GEN8_LQSC_RO_PERF_DIS (1<<27) #define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21) /* GEN8 chicken */ -#define HDC_CHICKEN0 0x7300 +#define HDC_CHICKEN0 _MMIO(0x7300) #define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14) #define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) @@ -5958,17 +6029,17 @@ enum skl_disp_power_wells { #define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) /* GEN9 chicken */ -#define SLICE_ECO_CHICKEN0 0x7308 +#define SLICE_ECO_CHICKEN0 _MMIO(0x7308) #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) /* WaCatErrorRejectionIssue */ -#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 +#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) -#define HSW_SCRATCH1 0xb038 +#define HSW_SCRATCH1 _MMIO(0xb038) #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) -#define BDW_SCRATCH1 0xb11c +#define BDW_SCRATCH1 _MMIO(0xb11c) #define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2) /* PCH */ @@ -6062,12 +6133,12 @@ enum skl_disp_power_wells { SDE_FDI_RXB_CPT | \ SDE_FDI_RXA_CPT) -#define SDEISR 0xc4000 -#define SDEIMR 0xc4004 -#define SDEIIR 0xc4008 -#define SDEIER 0xc400c +#define SDEISR _MMIO(0xc4000) +#define SDEIMR _MMIO(0xc4004) +#define SDEIIR _MMIO(0xc4008) +#define SDEIER _MMIO(0xc400c) -#define SERR_INT 0xc4040 +#define SERR_INT _MMIO(0xc4040) #define SERR_INT_POISON (1<<31) #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) @@ -6075,7 +6146,7 @@ enum skl_disp_power_wells { #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) /* digital port hotplug */ -#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ +#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */ #define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */ #define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */ #define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */ @@ -6112,42 +6183,42 @@ enum skl_disp_power_wells { #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) -#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */ +#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */ #define PORTE_HOTPLUG_ENABLE (1 << 4) #define PORTE_HOTPLUG_STATUS_MASK (3 << 0) #define PORTE_HOTPLUG_NO_DETECT (0 << 0) #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) -#define PCH_GPIOA 0xc5010 -#define PCH_GPIOB 0xc5014 -#define PCH_GPIOC 0xc5018 -#define PCH_GPIOD 0xc501c -#define PCH_GPIOE 0xc5020 -#define PCH_GPIOF 0xc5024 +#define PCH_GPIOA _MMIO(0xc5010) +#define PCH_GPIOB _MMIO(0xc5014) +#define PCH_GPIOC _MMIO(0xc5018) +#define PCH_GPIOD _MMIO(0xc501c) +#define PCH_GPIOE _MMIO(0xc5020) +#define PCH_GPIOF _MMIO(0xc5024) -#define PCH_GMBUS0 0xc5100 -#define PCH_GMBUS1 0xc5104 -#define PCH_GMBUS2 0xc5108 -#define PCH_GMBUS3 0xc510c -#define PCH_GMBUS4 0xc5110 -#define PCH_GMBUS5 0xc5120 +#define PCH_GMBUS0 _MMIO(0xc5100) +#define PCH_GMBUS1 _MMIO(0xc5104) +#define PCH_GMBUS2 _MMIO(0xc5108) +#define PCH_GMBUS3 _MMIO(0xc510c) +#define PCH_GMBUS4 _MMIO(0xc5110) +#define PCH_GMBUS5 _MMIO(0xc5120) #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 -#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) +#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) #define _PCH_FPA0 0xc6040 #define FP_CB_TUNE (0x3<<22) #define _PCH_FPA1 0xc6044 #define _PCH_FPB0 0xc6048 #define _PCH_FPB1 0xc604c -#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) -#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) +#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0) +#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1) -#define PCH_DPLL_TEST 0xc606c +#define PCH_DPLL_TEST _MMIO(0xc606c) -#define PCH_DREF_CONTROL 0xC6200 +#define PCH_DREF_CONTROL _MMIO(0xC6200) #define DREF_CONTROL_MASK 0x7fc3 #define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) #define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) @@ -6170,19 +6241,19 @@ enum skl_disp_power_wells { #define DREF_SSC4_DISABLE (0) #define DREF_SSC4_ENABLE (1) -#define PCH_RAWCLK_FREQ 0xc6204 +#define PCH_RAWCLK_FREQ _MMIO(0xc6204) #define FDL_TP1_TIMER_SHIFT 12 #define FDL_TP1_TIMER_MASK (3<<12) #define FDL_TP2_TIMER_SHIFT 10 #define FDL_TP2_TIMER_MASK (3<<10) #define RAWCLK_FREQ_MASK 0x3ff -#define PCH_DPLL_TMR_CFG 0xc6208 +#define PCH_DPLL_TMR_CFG _MMIO(0xc6208) -#define PCH_SSC4_PARMS 0xc6210 -#define PCH_SSC4_AUX_PARMS 0xc6214 +#define PCH_SSC4_PARMS _MMIO(0xc6210) +#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214) -#define PCH_DPLL_SEL 0xc7000 +#define PCH_DPLL_SEL _MMIO(0xc7000) #define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4)) #define TRANS_DPLLA_SEL(pipe) 0 #define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3)) @@ -6230,79 +6301,73 @@ enum skl_disp_power_wells { #define _VIDEO_DIP_DATA_B 0xe1208 #define _VIDEO_DIP_GCP_B 0xe1210 -#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) -#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) -#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) +#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) +#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) +#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) /* Per-transcoder DIP controls (VLV) */ -#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) -#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) -#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) +#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) +#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) +#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) -#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) -#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) -#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) +#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) +#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) +#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) -#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) -#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) -#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) +#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) +#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) +#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) #define VLV_TVIDEO_DIP_CTL(pipe) \ - _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \ - VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C) + _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \ + _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C) #define VLV_TVIDEO_DIP_DATA(pipe) \ - _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \ - VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C) + _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \ + _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C) #define VLV_TVIDEO_DIP_GCP(pipe) \ - _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ - VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C) + _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ + _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C) /* Haswell DIP controls */ -#define HSW_VIDEO_DIP_CTL_A 0x60200 -#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220 -#define HSW_VIDEO_DIP_VS_DATA_A 0x60260 -#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 -#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 -#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320 -#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240 -#define HSW_VIDEO_DIP_VS_ECC_A 0x60280 -#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 -#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300 -#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344 -#define HSW_VIDEO_DIP_GCP_A 0x60210 - -#define HSW_VIDEO_DIP_CTL_B 0x61200 -#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220 -#define HSW_VIDEO_DIP_VS_DATA_B 0x61260 -#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 -#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 -#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320 -#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240 -#define HSW_VIDEO_DIP_VS_ECC_B 0x61280 -#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 -#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300 -#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 -#define HSW_VIDEO_DIP_GCP_B 0x61210 - -#define HSW_TVIDEO_DIP_CTL(trans) \ - _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) -#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \ - (_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4) -#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \ - (_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4) -#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \ - (_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4) -#define HSW_TVIDEO_DIP_GCP(trans) \ - _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) -#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \ - (_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4) - -#define HSW_STEREO_3D_CTL_A 0x70020 -#define S3D_ENABLE (1<<31) -#define HSW_STEREO_3D_CTL_B 0x71020 - -#define HSW_STEREO_3D_CTL(trans) \ - _PIPE2(trans, HSW_STEREO_3D_CTL_A) + +#define _HSW_VIDEO_DIP_CTL_A 0x60200 +#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220 +#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260 +#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 +#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 +#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 +#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 +#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 +#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 +#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300 +#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344 +#define _HSW_VIDEO_DIP_GCP_A 0x60210 + +#define _HSW_VIDEO_DIP_CTL_B 0x61200 +#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220 +#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260 +#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 +#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 +#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320 +#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240 +#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280 +#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 +#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300 +#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344 +#define _HSW_VIDEO_DIP_GCP_B 0x61210 + +#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A) +#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) +#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) + +#define _HSW_STEREO_3D_CTL_A 0x70020 +#define S3D_ENABLE (1<<31) +#define _HSW_STEREO_3D_CTL_B 0x71020 + +#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A) #define _PCH_TRANS_HTOTAL_B 0xe1000 #define _PCH_TRANS_HBLANK_B 0xe1004 @@ -6310,16 +6375,15 @@ enum skl_disp_power_wells { #define _PCH_TRANS_VTOTAL_B 0xe100c #define _PCH_TRANS_VBLANK_B 0xe1010 #define _PCH_TRANS_VSYNC_B 0xe1014 -#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 +#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 -#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) -#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) -#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) -#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) -#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) -#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) -#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \ - _PCH_TRANS_VSYNCSHIFT_B) +#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) +#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) +#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) +#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) +#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) +#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) +#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B) #define _PCH_TRANSB_DATA_M1 0xe1030 #define _PCH_TRANSB_DATA_N1 0xe1034 @@ -6330,19 +6394,19 @@ enum skl_disp_power_wells { #define _PCH_TRANSB_LINK_M2 0xe1048 #define _PCH_TRANSB_LINK_N2 0xe104c -#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) -#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) -#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) -#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) -#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) -#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) -#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) -#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) +#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) +#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) +#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) +#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) +#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) +#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) +#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) +#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) #define _PCH_TRANSACONF 0xf0008 #define _PCH_TRANSBCONF 0xf1008 -#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) -#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */ +#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) +#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */ #define TRANS_DISABLE (0<<31) #define TRANS_ENABLE (1<<31) #define TRANS_STATE_MASK (1<<30) @@ -6363,47 +6427,47 @@ enum skl_disp_power_wells { #define _TRANSA_CHICKEN1 0xf0060 #define _TRANSB_CHICKEN1 0xf1060 -#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) +#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) #define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10) #define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) #define _TRANSA_CHICKEN2 0xf0064 #define _TRANSB_CHICKEN2 0xf1064 -#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) +#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) #define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) #define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) -#define SOUTH_CHICKEN1 0xc2000 +#define SOUTH_CHICKEN1 _MMIO(0xc2000) #define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_EN 18 #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define FDI_BC_BIFURCATION_SELECT (1 << 12) #define SPT_PWM_GRANULARITY (1<<0) -#define SOUTH_CHICKEN2 0xc2004 +#define SOUTH_CHICKEN2 _MMIO(0xc2004) #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) #define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) #define LPT_PWM_GRANULARITY (1<<5) #define DPLS_EDP_PPS_FIX_DIS (1<<0) -#define _FDI_RXA_CHICKEN 0xc200c -#define _FDI_RXB_CHICKEN 0xc2010 +#define _FDI_RXA_CHICKEN 0xc200c +#define _FDI_RXB_CHICKEN 0xc2010 #define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) #define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) -#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) +#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) -#define SOUTH_DSPCLK_GATE_D 0xc2020 +#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020) #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) /* CPU: FDI_TX */ -#define _FDI_TXA_CTL 0x60100 -#define _FDI_TXB_CTL 0x61100 -#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) +#define _FDI_TXA_CTL 0x60100 +#define _FDI_TXB_CTL 0x61100 +#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) #define FDI_TX_DISABLE (0<<31) #define FDI_TX_ENABLE (1<<31) #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) @@ -6453,7 +6517,7 @@ enum skl_disp_power_wells { /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ #define _FDI_RXA_CTL 0xf000c #define _FDI_RXB_CTL 0xf100c -#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) +#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) #define FDI_RX_ENABLE (1<<31) /* train, dp width same as FDI_TX */ #define FDI_FS_ERRC_ENABLE (1<<27) @@ -6489,14 +6553,14 @@ enum skl_disp_power_wells { #define FDI_RX_TP1_TO_TP2_48 (2<<20) #define FDI_RX_TP1_TO_TP2_64 (3<<20) #define FDI_RX_FDI_DELAY_90 (0x90<<0) -#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) +#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) -#define _FDI_RXA_TUSIZE1 0xf0030 -#define _FDI_RXA_TUSIZE2 0xf0038 -#define _FDI_RXB_TUSIZE1 0xf1030 -#define _FDI_RXB_TUSIZE2 0xf1038 -#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) -#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) +#define _FDI_RXA_TUSIZE1 0xf0030 +#define _FDI_RXA_TUSIZE2 0xf0038 +#define _FDI_RXB_TUSIZE1 0xf1030 +#define _FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) +#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) /* FDI_RX interrupt register format */ #define FDI_RX_INTER_LANE_ALIGN (1<<10) @@ -6511,44 +6575,41 @@ enum skl_disp_power_wells { #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) -#define _FDI_RXA_IIR 0xf0014 -#define _FDI_RXA_IMR 0xf0018 -#define _FDI_RXB_IIR 0xf1014 -#define _FDI_RXB_IMR 0xf1018 -#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) -#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) +#define _FDI_RXA_IIR 0xf0014 +#define _FDI_RXA_IMR 0xf0018 +#define _FDI_RXB_IIR 0xf1014 +#define _FDI_RXB_IMR 0xf1018 +#define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) +#define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) -#define FDI_PLL_CTL_1 0xfe000 -#define FDI_PLL_CTL_2 0xfe004 +#define FDI_PLL_CTL_1 _MMIO(0xfe000) +#define FDI_PLL_CTL_2 _MMIO(0xfe004) -#define PCH_LVDS 0xe1180 +#define PCH_LVDS _MMIO(0xe1180) #define LVDS_DETECTED (1 << 1) /* vlv has 2 sets of panel control regs. */ -#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) -#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) -#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) +#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) +#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) +#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) #define PANEL_PORT_SELECT_VLV(port) ((port) << 30) -#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) -#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) - -#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) -#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) -#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) -#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) -#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) - -#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS) -#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL) -#define VLV_PIPE_PP_ON_DELAYS(pipe) \ - _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS) -#define VLV_PIPE_PP_OFF_DELAYS(pipe) \ - _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS) -#define VLV_PIPE_PP_DIVISOR(pipe) \ - _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR) - -#define PCH_PP_STATUS 0xc7200 -#define PCH_PP_CONTROL 0xc7204 +#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) +#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) + +#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) +#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) +#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) +#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) +#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) + +#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS) +#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL) +#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS) +#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS) +#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR) + +#define _PCH_PP_STATUS 0xc7200 +#define _PCH_PP_CONTROL 0xc7204 #define PANEL_UNLOCK_REGS (0xabcd << 16) #define PANEL_UNLOCK_MASK (0xffff << 16) #define BXT_POWER_CYCLE_DELAY_MASK (0x1f0) @@ -6558,7 +6619,7 @@ enum skl_disp_power_wells { #define PANEL_POWER_RESET (1 << 1) #define PANEL_POWER_OFF (0 << 0) #define PANEL_POWER_ON (1 << 0) -#define PCH_PP_ON_DELAYS 0xc7208 +#define _PCH_PP_ON_DELAYS 0xc7208 #define PANEL_PORT_SELECT_MASK (3 << 30) #define PANEL_PORT_SELECT_LVDS (0 << 30) #define PANEL_PORT_SELECT_DPA (1 << 30) @@ -6569,52 +6630,64 @@ enum skl_disp_power_wells { #define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) #define PANEL_LIGHT_ON_DELAY_SHIFT 0 -#define PCH_PP_OFF_DELAYS 0xc720c +#define _PCH_PP_OFF_DELAYS 0xc720c #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) #define PANEL_POWER_DOWN_DELAY_SHIFT 16 #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) #define PANEL_LIGHT_OFF_DELAY_SHIFT 0 -#define PCH_PP_DIVISOR 0xc7210 +#define _PCH_PP_DIVISOR 0xc7210 #define PP_REFERENCE_DIVIDER_MASK (0xffffff00) #define PP_REFERENCE_DIVIDER_SHIFT 8 #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 +#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS) +#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL) +#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS) +#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS) +#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR) + /* BXT PPS changes - 2nd set of PPS registers */ #define _BXT_PP_STATUS2 0xc7300 #define _BXT_PP_CONTROL2 0xc7304 #define _BXT_PP_ON_DELAYS2 0xc7308 #define _BXT_PP_OFF_DELAYS2 0xc730c -#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2) -#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2) -#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) -#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) - -#define PCH_DP_B 0xe4100 -#define PCH_DPB_AUX_CH_CTL 0xe4110 -#define PCH_DPB_AUX_CH_DATA1 0xe4114 -#define PCH_DPB_AUX_CH_DATA2 0xe4118 -#define PCH_DPB_AUX_CH_DATA3 0xe411c -#define PCH_DPB_AUX_CH_DATA4 0xe4120 -#define PCH_DPB_AUX_CH_DATA5 0xe4124 - -#define PCH_DP_C 0xe4200 -#define PCH_DPC_AUX_CH_CTL 0xe4210 -#define PCH_DPC_AUX_CH_DATA1 0xe4214 -#define PCH_DPC_AUX_CH_DATA2 0xe4218 -#define PCH_DPC_AUX_CH_DATA3 0xe421c -#define PCH_DPC_AUX_CH_DATA4 0xe4220 -#define PCH_DPC_AUX_CH_DATA5 0xe4224 - -#define PCH_DP_D 0xe4300 -#define PCH_DPD_AUX_CH_CTL 0xe4310 -#define PCH_DPD_AUX_CH_DATA1 0xe4314 -#define PCH_DPD_AUX_CH_DATA2 0xe4318 -#define PCH_DPD_AUX_CH_DATA3 0xe431c -#define PCH_DPD_AUX_CH_DATA4 0xe4320 -#define PCH_DPD_AUX_CH_DATA5 0xe4324 +#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2) +#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2) +#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) +#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) + +#define _PCH_DP_B 0xe4100 +#define PCH_DP_B _MMIO(_PCH_DP_B) +#define _PCH_DPB_AUX_CH_CTL 0xe4110 +#define _PCH_DPB_AUX_CH_DATA1 0xe4114 +#define _PCH_DPB_AUX_CH_DATA2 0xe4118 +#define _PCH_DPB_AUX_CH_DATA3 0xe411c +#define _PCH_DPB_AUX_CH_DATA4 0xe4120 +#define _PCH_DPB_AUX_CH_DATA5 0xe4124 + +#define _PCH_DP_C 0xe4200 +#define PCH_DP_C _MMIO(_PCH_DP_C) +#define _PCH_DPC_AUX_CH_CTL 0xe4210 +#define _PCH_DPC_AUX_CH_DATA1 0xe4214 +#define _PCH_DPC_AUX_CH_DATA2 0xe4218 +#define _PCH_DPC_AUX_CH_DATA3 0xe421c +#define _PCH_DPC_AUX_CH_DATA4 0xe4220 +#define _PCH_DPC_AUX_CH_DATA5 0xe4224 + +#define _PCH_DP_D 0xe4300 +#define PCH_DP_D _MMIO(_PCH_DP_D) +#define _PCH_DPD_AUX_CH_CTL 0xe4310 +#define _PCH_DPD_AUX_CH_DATA1 0xe4314 +#define _PCH_DPD_AUX_CH_DATA2 0xe4318 +#define _PCH_DPD_AUX_CH_DATA3 0xe431c +#define _PCH_DPD_AUX_CH_DATA4 0xe4320 +#define _PCH_DPD_AUX_CH_DATA5 0xe4324 + +#define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL) +#define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ /* CPT */ #define PORT_TRANS_A_SEL_CPT 0 @@ -6627,10 +6700,10 @@ enum skl_disp_power_wells { #define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24) #define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16) -#define TRANS_DP_CTL_A 0xe0300 -#define TRANS_DP_CTL_B 0xe1300 -#define TRANS_DP_CTL_C 0xe2300 -#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) +#define _TRANS_DP_CTL_A 0xe0300 +#define _TRANS_DP_CTL_B 0xe1300 +#define _TRANS_DP_CTL_C 0xe2300 +#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B) #define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_C (1<<29) @@ -6683,40 +6756,40 @@ enum skl_disp_power_wells { #define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) -#define VLV_PMWGICZ 0x1300a4 +#define VLV_PMWGICZ _MMIO(0x1300a4) -#define FORCEWAKE 0xA18C -#define FORCEWAKE_VLV 0x1300b0 -#define FORCEWAKE_ACK_VLV 0x1300b4 -#define FORCEWAKE_MEDIA_VLV 0x1300b8 -#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc -#define FORCEWAKE_ACK_HSW 0x130044 -#define FORCEWAKE_ACK 0x130090 -#define VLV_GTLC_WAKE_CTRL 0x130090 +#define FORCEWAKE _MMIO(0xA18C) +#define FORCEWAKE_VLV _MMIO(0x1300b0) +#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4) +#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8) +#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc) +#define FORCEWAKE_ACK_HSW _MMIO(0x130044) +#define FORCEWAKE_ACK _MMIO(0x130090) +#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090) #define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25) #define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24) #define VLV_GTLC_ALLOWWAKEREQ (1 << 0) -#define VLV_GTLC_PW_STATUS 0x130094 +#define VLV_GTLC_PW_STATUS _MMIO(0x130094) #define VLV_GTLC_ALLOWWAKEACK (1 << 0) #define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) -#define FORCEWAKE_MT 0xa188 /* multi-threaded */ -#define FORCEWAKE_MEDIA_GEN9 0xa270 -#define FORCEWAKE_RENDER_GEN9 0xa278 -#define FORCEWAKE_BLITTER_GEN9 0xa188 -#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88 -#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84 -#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044 +#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */ +#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270) +#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278) +#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188) +#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88) +#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84) +#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044) #define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_USER 0x2 -#define FORCEWAKE_MT_ACK 0x130040 -#define ECOBUS 0xa180 +#define FORCEWAKE_MT_ACK _MMIO(0x130040) +#define ECOBUS _MMIO(0xa180) #define FORCEWAKE_MT_ENABLE (1<<5) -#define VLV_SPAREG2H 0xA194 +#define VLV_SPAREG2H _MMIO(0xA194) -#define GTFIFODBG 0x120000 +#define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDROPERR (1<<6) #define GT_FIFO_BLOBDROPERR (1<<5) #define GT_FIFO_SB_READ_ABORTERR (1<<4) @@ -6725,23 +6798,23 @@ enum skl_disp_power_wells { #define GT_FIFO_IAWRERR (1<<1) #define GT_FIFO_IARDERR (1<<0) -#define GTFIFOCTL 0x120008 +#define GTFIFOCTL _MMIO(0x120008) #define GT_FIFO_FREE_ENTRIES_MASK 0x7f #define GT_FIFO_NUM_RESERVED_ENTRIES 20 #define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) #define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) -#define HSW_IDICR 0x9008 +#define HSW_IDICR _MMIO(0x9008) #define IDIHASHMSK(x) (((x) & 0x3f) << 16) -#define HSW_EDRAM_PRESENT 0x120010 +#define HSW_EDRAM_PRESENT _MMIO(0x120010) #define EDRAM_ENABLED 0x1 -#define GEN6_UCGCTL1 0x9400 +#define GEN6_UCGCTL1 _MMIO(0x9400) # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) -#define GEN6_UCGCTL2 0x9404 +#define GEN6_UCGCTL2 _MMIO(0x9404) # define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31) # define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) # define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) @@ -6749,30 +6822,30 @@ enum skl_disp_power_wells { # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) -#define GEN6_UCGCTL3 0x9408 +#define GEN6_UCGCTL3 _MMIO(0x9408) -#define GEN7_UCGCTL4 0x940c +#define GEN7_UCGCTL4 _MMIO(0x940c) #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) -#define GEN6_RCGCTL1 0x9410 -#define GEN6_RCGCTL2 0x9414 -#define GEN6_RSTCTL 0x9420 +#define GEN6_RCGCTL1 _MMIO(0x9410) +#define GEN6_RCGCTL2 _MMIO(0x9414) +#define GEN6_RSTCTL _MMIO(0x9420) -#define GEN8_UCGCTL6 0x9430 +#define GEN8_UCGCTL6 _MMIO(0x9430) #define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24) #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) #define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28) -#define GEN6_GFXPAUSE 0xA000 -#define GEN6_RPNSWREQ 0xA008 +#define GEN6_GFXPAUSE _MMIO(0xA000) +#define GEN6_RPNSWREQ _MMIO(0xA008) #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) #define HSW_FREQUENCY(x) ((x)<<24) #define GEN9_FREQUENCY(x) ((x)<<23) #define GEN6_OFFSET(x) ((x)<<19) #define GEN6_AGGRESSIVE_TURBO (0<<15) -#define GEN6_RC_VIDEO_FREQ 0xA00C -#define GEN6_RC_CONTROL 0xA090 +#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C) +#define GEN6_RC_CONTROL _MMIO(0xA090) #define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) #define GEN6_RC_CTL_RC6p_ENABLE (1<<17) #define GEN6_RC_CTL_RC6_ENABLE (1<<18) @@ -6782,16 +6855,16 @@ enum skl_disp_power_wells { #define GEN7_RC_CTL_TO_MODE (1<<28) #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) #define GEN6_RC_CTL_HW_ENABLE (1<<31) -#define GEN6_RP_DOWN_TIMEOUT 0xA010 -#define GEN6_RP_INTERRUPT_LIMITS 0xA014 -#define GEN6_RPSTAT1 0xA01C +#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010) +#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014) +#define GEN6_RPSTAT1 _MMIO(0xA01C) #define GEN6_CAGF_SHIFT 8 #define HSW_CAGF_SHIFT 7 #define GEN9_CAGF_SHIFT 23 #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) #define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) -#define GEN6_RP_CONTROL 0xA024 +#define GEN6_RP_CONTROL _MMIO(0xA024) #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_MEDIA_MODE_MASK (3<<9) #define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) @@ -6805,53 +6878,53 @@ enum skl_disp_power_wells { #define GEN6_RP_UP_BUSY_CONT (0x4<<3) #define GEN6_RP_DOWN_IDLE_AVG (0x2<<0) #define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) -#define GEN6_RP_UP_THRESHOLD 0xA02C -#define GEN6_RP_DOWN_THRESHOLD 0xA030 -#define GEN6_RP_CUR_UP_EI 0xA050 +#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C) +#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030) +#define GEN6_RP_CUR_UP_EI _MMIO(0xA050) #define GEN6_CURICONT_MASK 0xffffff -#define GEN6_RP_CUR_UP 0xA054 +#define GEN6_RP_CUR_UP _MMIO(0xA054) #define GEN6_CURBSYTAVG_MASK 0xffffff -#define GEN6_RP_PREV_UP 0xA058 -#define GEN6_RP_CUR_DOWN_EI 0xA05C +#define GEN6_RP_PREV_UP _MMIO(0xA058) +#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C) #define GEN6_CURIAVG_MASK 0xffffff -#define GEN6_RP_CUR_DOWN 0xA060 -#define GEN6_RP_PREV_DOWN 0xA064 -#define GEN6_RP_UP_EI 0xA068 -#define GEN6_RP_DOWN_EI 0xA06C -#define GEN6_RP_IDLE_HYSTERSIS 0xA070 -#define GEN6_RPDEUHWTC 0xA080 -#define GEN6_RPDEUC 0xA084 -#define GEN6_RPDEUCSW 0xA088 -#define GEN6_RC_STATE 0xA094 -#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 -#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C -#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 -#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 -#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC -#define GEN6_RC_SLEEP 0xA0B0 -#define GEN6_RCUBMABDTMR 0xA0B0 -#define GEN6_RC1e_THRESHOLD 0xA0B4 -#define GEN6_RC6_THRESHOLD 0xA0B8 -#define GEN6_RC6p_THRESHOLD 0xA0BC -#define VLV_RCEDATA 0xA0BC -#define GEN6_RC6pp_THRESHOLD 0xA0C0 -#define GEN6_PMINTRMSK 0xA168 +#define GEN6_RP_CUR_DOWN _MMIO(0xA060) +#define GEN6_RP_PREV_DOWN _MMIO(0xA064) +#define GEN6_RP_UP_EI _MMIO(0xA068) +#define GEN6_RP_DOWN_EI _MMIO(0xA06C) +#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070) +#define GEN6_RPDEUHWTC _MMIO(0xA080) +#define GEN6_RPDEUC _MMIO(0xA084) +#define GEN6_RPDEUCSW _MMIO(0xA088) +#define GEN6_RC_STATE _MMIO(0xA094) +#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) +#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) +#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) +#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8) +#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC) +#define GEN6_RC_SLEEP _MMIO(0xA0B0) +#define GEN6_RCUBMABDTMR _MMIO(0xA0B0) +#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4) +#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8) +#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC) +#define VLV_RCEDATA _MMIO(0xA0BC) +#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) +#define GEN6_PMINTRMSK _MMIO(0xA168) #define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) -#define VLV_PWRDWNUPCTL 0xA294 -#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4 -#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8 -#define GEN9_PG_ENABLE 0xA210 +#define VLV_PWRDWNUPCTL _MMIO(0xA294) +#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) +#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) +#define GEN9_PG_ENABLE _MMIO(0xA210) #define GEN9_RENDER_PG_ENABLE (1<<0) #define GEN9_MEDIA_PG_ENABLE (1<<1) -#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C) +#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C) #define PIXEL_OVERLAP_CNT_MASK (3 << 30) #define PIXEL_OVERLAP_CNT_SHIFT 30 -#define GEN6_PMISR 0x44020 -#define GEN6_PMIMR 0x44024 /* rps_lock */ -#define GEN6_PMIIR 0x44028 -#define GEN6_PMIER 0x4402C +#define GEN6_PMISR _MMIO(0x44020) +#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */ +#define GEN6_PMIIR _MMIO(0x44028) +#define GEN6_PMIER _MMIO(0x4402C) #define GEN6_PM_MBOX_EVENT (1<<25) #define GEN6_PM_THERMAL_EVENT (1<<24) #define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) @@ -6863,30 +6936,30 @@ enum skl_disp_power_wells { GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) -#define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4) +#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4) #define GEN7_GT_SCRATCH_REG_NUM 8 -#define VLV_GTLC_SURVIVABILITY_REG 0x130098 +#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098) #define VLV_GFX_CLK_STATUS_BIT (1<<3) #define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) -#define GEN6_GT_GFX_RC6_LOCKED 0x138104 -#define VLV_COUNTER_CONTROL 0x138104 +#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104) +#define VLV_COUNTER_CONTROL _MMIO(0x138104) #define VLV_COUNT_RANGE_HIGH (1<<15) #define VLV_MEDIA_RC0_COUNT_EN (1<<5) #define VLV_RENDER_RC0_COUNT_EN (1<<4) #define VLV_MEDIA_RC6_COUNT_EN (1<<1) #define VLV_RENDER_RC6_COUNT_EN (1<<0) -#define GEN6_GT_GFX_RC6 0x138108 -#define VLV_GT_RENDER_RC6 0x138108 -#define VLV_GT_MEDIA_RC6 0x13810C +#define GEN6_GT_GFX_RC6 _MMIO(0x138108) +#define VLV_GT_RENDER_RC6 _MMIO(0x138108) +#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C) -#define GEN6_GT_GFX_RC6p 0x13810C -#define GEN6_GT_GFX_RC6pp 0x138110 -#define VLV_RENDER_C0_COUNT 0x138118 -#define VLV_MEDIA_C0_COUNT 0x13811C +#define GEN6_GT_GFX_RC6p _MMIO(0x13810C) +#define GEN6_GT_GFX_RC6pp _MMIO(0x138110) +#define VLV_RENDER_C0_COUNT _MMIO(0x138118) +#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C) -#define GEN6_PCODE_MAILBOX 0x138124 +#define GEN6_PCODE_MAILBOX _MMIO(0x138124) #define GEN6_PCODE_READY (1<<31) #define GEN6_PCODE_WRITE_RC6VIDS 0x4 #define GEN6_PCODE_READ_RC6VIDS 0x5 @@ -6909,12 +6982,12 @@ enum skl_disp_power_wells { #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 #define DISPLAY_IPS_CONTROL 0x19 #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A -#define GEN6_PCODE_DATA 0x138128 +#define GEN6_PCODE_DATA _MMIO(0x138128) #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 -#define GEN6_PCODE_DATA1 0x13812C +#define GEN6_PCODE_DATA1 _MMIO(0x13812C) -#define GEN6_GT_CORE_STATUS 0x138060 +#define GEN6_GT_CORE_STATUS _MMIO(0x138060) #define GEN6_CORE_CPD_STATE_MASK (7<<4) #define GEN6_RCn_MASK 7 #define GEN6_RC0 0 @@ -6922,26 +6995,26 @@ enum skl_disp_power_wells { #define GEN6_RC6 3 #define GEN6_RC7 4 -#define GEN8_GT_SLICE_INFO 0x138064 +#define GEN8_GT_SLICE_INFO _MMIO(0x138064) #define GEN8_LSLICESTAT_MASK 0x7 -#define CHV_POWER_SS0_SIG1 0xa720 -#define CHV_POWER_SS1_SIG1 0xa728 +#define CHV_POWER_SS0_SIG1 _MMIO(0xa720) +#define CHV_POWER_SS1_SIG1 _MMIO(0xa728) #define CHV_SS_PG_ENABLE (1<<1) #define CHV_EU08_PG_ENABLE (1<<9) #define CHV_EU19_PG_ENABLE (1<<17) #define CHV_EU210_PG_ENABLE (1<<25) -#define CHV_POWER_SS0_SIG2 0xa724 -#define CHV_POWER_SS1_SIG2 0xa72c +#define CHV_POWER_SS0_SIG2 _MMIO(0xa724) +#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c) #define CHV_EU311_PG_ENABLE (1<<1) -#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4) +#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4) #define GEN9_PGCTL_SLICE_ACK (1 << 0) #define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2)) -#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8) -#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8) +#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8) +#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8) #define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) #define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) #define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) @@ -6951,18 +7024,17 @@ enum skl_disp_power_wells { #define GEN9_PGCTL_SSB_EU210_ACK (1 << 12) #define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) -#define GEN7_MISCCPCTL (0x9424) +#define GEN7_MISCCPCTL _MMIO(0x9424) #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) #define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2) #define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4) #define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6) -#define GEN8_GARBCNTL 0xB004 +#define GEN8_GARBCNTL _MMIO(0xB004) #define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) /* IVYBRIDGE DPF */ -#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ -#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */ +#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ #define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) #define GEN7_PARITY_ERROR_VALID (1<<13) #define GEN7_L3CDERRST1_BANK_MASK (3<<11) @@ -6975,119 +7047,102 @@ enum skl_disp_power_wells { ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) #define GEN7_L3CDERRST1_ENABLE (1<<7) -#define GEN7_L3LOG_BASE 0xB070 -#define HSW_L3LOG_BASE_SLICE1 0xB270 +#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4) #define GEN7_L3LOG_SIZE 0x80 -#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ -#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 +#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */ +#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100) #define GEN7_MAX_PS_THREAD_DEP (8<<12) #define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) #define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4) #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) -#define GEN9_HALF_SLICE_CHICKEN5 0xe188 +#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188) #define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) #define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3) -#define GEN8_ROW_CHICKEN 0xe4f0 +#define GEN8_ROW_CHICKEN _MMIO(0xe4f0) #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) #define STALL_DOP_GATING_DISABLE (1<<5) -#define GEN7_ROW_CHICKEN2 0xe4f4 -#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 +#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1<<0) -#define HSW_ROW_CHICKEN3 0xe49c +#define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) -#define HALF_SLICE_CHICKEN2 0xe180 +#define HALF_SLICE_CHICKEN2 _MMIO(0xe180) #define GEN8_ST_PO_DISABLE (1<<13) -#define HALF_SLICE_CHICKEN3 0xe184 +#define HALF_SLICE_CHICKEN3 _MMIO(0xe184) #define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) -#define GEN9_HALF_SLICE_CHICKEN7 0xe194 +#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) #define GEN9_ENABLE_YV12_BUGFIX (1<<4) /* Audio */ -#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) +#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020) #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 #define INTEL_AUDIO_DEVCTG 0x80862802 -#define G4X_AUD_CNTL_ST 0x620B4 +#define G4X_AUD_CNTL_ST _MMIO(0x620B4) #define G4X_ELDV_DEVCL_DEVBLC (1 << 13) #define G4X_ELDV_DEVCTG (1 << 14) #define G4X_ELD_ADDR_MASK (0xf << 5) #define G4X_ELD_ACK (1 << 4) -#define G4X_HDMIW_HDMIEDID 0x6210C +#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C) #define _IBX_HDMIW_HDMIEDID_A 0xE2050 #define _IBX_HDMIW_HDMIEDID_B 0xE2150 -#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ - _IBX_HDMIW_HDMIEDID_A, \ - _IBX_HDMIW_HDMIEDID_B) +#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \ + _IBX_HDMIW_HDMIEDID_B) #define _IBX_AUD_CNTL_ST_A 0xE20B4 #define _IBX_AUD_CNTL_ST_B 0xE21B4 -#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ - _IBX_AUD_CNTL_ST_A, \ - _IBX_AUD_CNTL_ST_B) +#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \ + _IBX_AUD_CNTL_ST_B) #define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10) #define IBX_ELD_ADDRESS_MASK (0x1f << 5) #define IBX_ELD_ACK (1 << 4) -#define IBX_AUD_CNTL_ST2 0xE20C0 +#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0) #define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4)) #define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4)) #define _CPT_HDMIW_HDMIEDID_A 0xE5050 #define _CPT_HDMIW_HDMIEDID_B 0xE5150 -#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ - _CPT_HDMIW_HDMIEDID_A, \ - _CPT_HDMIW_HDMIEDID_B) +#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B) #define _CPT_AUD_CNTL_ST_A 0xE50B4 #define _CPT_AUD_CNTL_ST_B 0xE51B4 -#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ - _CPT_AUD_CNTL_ST_A, \ - _CPT_AUD_CNTL_ST_B) -#define CPT_AUD_CNTRL_ST2 0xE50C0 +#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B) +#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0) #define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) #define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) -#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ - _VLV_HDMIW_HDMIEDID_A, \ - _VLV_HDMIW_HDMIEDID_B) +#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B) #define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) #define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) -#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ - _VLV_AUD_CNTL_ST_A, \ - _VLV_AUD_CNTL_ST_B) -#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0) +#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B) +#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0) /* These are the 4 32-bit write offset registers for each stream * output buffer. It determines the offset from the * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. */ -#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) +#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4) #define _IBX_AUD_CONFIG_A 0xe2000 #define _IBX_AUD_CONFIG_B 0xe2100 -#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ - _IBX_AUD_CONFIG_A, \ - _IBX_AUD_CONFIG_B) +#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B) #define _CPT_AUD_CONFIG_A 0xe5000 #define _CPT_AUD_CONFIG_B 0xe5100 -#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ - _CPT_AUD_CONFIG_A, \ - _CPT_AUD_CONFIG_B) +#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B) #define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) #define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) -#define VLV_AUD_CFG(pipe) _PIPE(pipe, \ - _VLV_AUD_CONFIG_A, \ - _VLV_AUD_CONFIG_B) +#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B) #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) @@ -7112,72 +7167,62 @@ enum skl_disp_power_wells { /* HSW Audio */ #define _HSW_AUD_CONFIG_A 0x65000 #define _HSW_AUD_CONFIG_B 0x65100 -#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ - _HSW_AUD_CONFIG_A, \ - _HSW_AUD_CONFIG_B) +#define HSW_AUD_CFG(pipe) _MMIO_PIPE(pipe, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B) #define _HSW_AUD_MISC_CTRL_A 0x65010 #define _HSW_AUD_MISC_CTRL_B 0x65110 -#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ - _HSW_AUD_MISC_CTRL_A, \ - _HSW_AUD_MISC_CTRL_B) +#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B) #define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 #define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 -#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ - _HSW_AUD_DIP_ELD_CTRL_ST_A, \ - _HSW_AUD_DIP_ELD_CTRL_ST_B) +#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B) /* Audio Digital Converter */ #define _HSW_AUD_DIG_CNVT_1 0x65080 #define _HSW_AUD_DIG_CNVT_2 0x65180 -#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ - _HSW_AUD_DIG_CNVT_1, \ - _HSW_AUD_DIG_CNVT_2) +#define AUD_DIG_CNVT(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2) #define DIP_PORT_SEL_MASK 0x3 #define _HSW_AUD_EDID_DATA_A 0x65050 #define _HSW_AUD_EDID_DATA_B 0x65150 -#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ - _HSW_AUD_EDID_DATA_A, \ - _HSW_AUD_EDID_DATA_B) +#define HSW_AUD_EDID_DATA(pipe) _MMIO_PIPE(pipe, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B) -#define HSW_AUD_PIPE_CONV_CFG 0x6507c -#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 +#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c) +#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0) #define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4)) #define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4)) #define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4)) #define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4)) -#define HSW_AUD_CHICKENBIT 0x65f10 +#define HSW_AUD_CHICKENBIT _MMIO(0x65f10) #define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) /* HSW Power Wells */ -#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ -#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ -#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ -#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ +#define HSW_PWR_WELL_BIOS _MMIO(0x45400) /* CTL1 */ +#define HSW_PWR_WELL_DRIVER _MMIO(0x45404) /* CTL2 */ +#define HSW_PWR_WELL_KVMR _MMIO(0x45408) /* CTL3 */ +#define HSW_PWR_WELL_DEBUG _MMIO(0x4540C) /* CTL4 */ #define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) #define HSW_PWR_WELL_STATE_ENABLED (1<<30) -#define HSW_PWR_WELL_CTL5 0x45410 +#define HSW_PWR_WELL_CTL5 _MMIO(0x45410) #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) #define HSW_PWR_WELL_FORCE_ON (1<<19) -#define HSW_PWR_WELL_CTL6 0x45414 +#define HSW_PWR_WELL_CTL6 _MMIO(0x45414) /* SKL Fuse Status */ -#define SKL_FUSE_STATUS 0x42000 +#define SKL_FUSE_STATUS _MMIO(0x42000) #define SKL_FUSE_DOWNLOAD_STATUS (1<<31) #define SKL_FUSE_PG0_DIST_STATUS (1<<27) #define SKL_FUSE_PG1_DIST_STATUS (1<<26) #define SKL_FUSE_PG2_DIST_STATUS (1<<25) /* Per-pipe DDI Function Control */ -#define TRANS_DDI_FUNC_CTL_A 0x60400 -#define TRANS_DDI_FUNC_CTL_B 0x61400 -#define TRANS_DDI_FUNC_CTL_C 0x62400 -#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 -#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) +#define _TRANS_DDI_FUNC_CTL_A 0x60400 +#define _TRANS_DDI_FUNC_CTL_B 0x61400 +#define _TRANS_DDI_FUNC_CTL_C 0x62400 +#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 +#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) #define TRANS_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ @@ -7207,9 +7252,9 @@ enum skl_disp_power_wells { #define TRANS_DDI_BFI_ENABLE (1<<4) /* DisplayPort Transport Control */ -#define DP_TP_CTL_A 0x64040 -#define DP_TP_CTL_B 0x64140 -#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) +#define _DP_TP_CTL_A 0x64040 +#define _DP_TP_CTL_B 0x64140 +#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) #define DP_TP_CTL_ENABLE (1<<31) #define DP_TP_CTL_MODE_SST (0<<27) #define DP_TP_CTL_MODE_MST (1<<27) @@ -7225,9 +7270,9 @@ enum skl_disp_power_wells { #define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) /* DisplayPort Transport Status */ -#define DP_TP_STATUS_A 0x64044 -#define DP_TP_STATUS_B 0x64144 -#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) +#define _DP_TP_STATUS_A 0x64044 +#define _DP_TP_STATUS_B 0x64144 +#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) #define DP_TP_STATUS_IDLE_DONE (1<<25) #define DP_TP_STATUS_ACT_SENT (1<<24) #define DP_TP_STATUS_MODE_STATUS_MST (1<<23) @@ -7237,9 +7282,9 @@ enum skl_disp_power_wells { #define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0) /* DDI Buffer Control */ -#define DDI_BUF_CTL_A 0x64000 -#define DDI_BUF_CTL_B 0x64100 -#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) +#define _DDI_BUF_CTL_A 0x64000 +#define _DDI_BUF_CTL_B 0x64100 +#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B) #define DDI_BUF_CTL_ENABLE (1<<31) #define DDI_BUF_TRANS_SELECT(n) ((n) << 24) #define DDI_BUF_EMP_MASK (0xf<<24) @@ -7252,17 +7297,17 @@ enum skl_disp_power_wells { #define DDI_INIT_DISPLAY_DETECTED (1<<0) /* DDI Buffer Translations */ -#define DDI_BUF_TRANS_A 0x64E00 -#define DDI_BUF_TRANS_B 0x64E60 -#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8) -#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4) +#define _DDI_BUF_TRANS_A 0x64E00 +#define _DDI_BUF_TRANS_B 0x64E60 +#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8) +#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4) /* Sideband Interface (SBI) is programmed indirectly, via * SBI_ADDR, which contains the register offset; and SBI_DATA, * which contains the payload */ -#define SBI_ADDR 0xC6000 -#define SBI_DATA 0xC6004 -#define SBI_CTL_STAT 0xC6008 +#define SBI_ADDR _MMIO(0xC6000) +#define SBI_DATA _MMIO(0xC6004) +#define SBI_CTL_STAT _MMIO(0xC6008) #define SBI_CTL_DEST_ICLK (0x0<<16) #define SBI_CTL_DEST_MPHY (0x1<<16) #define SBI_CTL_OP_IORD (0x2<<8) @@ -7293,12 +7338,12 @@ enum skl_disp_power_wells { #define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) /* LPT PIXCLK_GATE */ -#define PIXCLK_GATE 0xC6020 +#define PIXCLK_GATE _MMIO(0xC6020) #define PIXCLK_GATE_UNGATE (1<<0) #define PIXCLK_GATE_GATE (0<<0) /* SPLL */ -#define SPLL_CTL 0x46020 +#define SPLL_CTL _MMIO(0x46020) #define SPLL_PLL_ENABLE (1<<31) #define SPLL_PLL_SSC (1<<28) #define SPLL_PLL_NON_SSC (2<<28) @@ -7310,9 +7355,9 @@ enum skl_disp_power_wells { #define SPLL_PLL_FREQ_MASK (3<<26) /* WRPLL */ -#define WRPLL_CTL1 0x46040 -#define WRPLL_CTL2 0x46060 -#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2) +#define _WRPLL_CTL1 0x46040 +#define _WRPLL_CTL2 0x46060 +#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2) #define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_SSC (1<<28) #define WRPLL_PLL_NON_SSC (2<<28) @@ -7329,9 +7374,9 @@ enum skl_disp_power_wells { #define WRPLL_DIVIDER_FB_MASK (0xff<<16) /* Port clock selection */ -#define PORT_CLK_SEL_A 0x46100 -#define PORT_CLK_SEL_B 0x46104 -#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) +#define _PORT_CLK_SEL_A 0x46100 +#define _PORT_CLK_SEL_B 0x46104 +#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B) #define PORT_CLK_SEL_LCPLL_2700 (0<<29) #define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29) @@ -7343,18 +7388,18 @@ enum skl_disp_power_wells { #define PORT_CLK_SEL_MASK (7<<29) /* Transcoder clock selection */ -#define TRANS_CLK_SEL_A 0x46140 -#define TRANS_CLK_SEL_B 0x46144 -#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) +#define _TRANS_CLK_SEL_A 0x46140 +#define _TRANS_CLK_SEL_B 0x46144 +#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B) /* For each transcoder, we need to select the corresponding port clock */ #define TRANS_CLK_SEL_DISABLED (0x0<<29) #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) -#define TRANSA_MSA_MISC 0x60410 -#define TRANSB_MSA_MISC 0x61410 -#define TRANSC_MSA_MISC 0x62410 -#define TRANS_EDP_MSA_MISC 0x6f410 -#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) +#define _TRANSA_MSA_MISC 0x60410 +#define _TRANSB_MSA_MISC 0x61410 +#define _TRANSC_MSA_MISC 0x62410 +#define _TRANS_EDP_MSA_MISC 0x6f410 +#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) #define TRANS_MSA_SYNC_CLK (1<<0) #define TRANS_MSA_6_BPC (0<<5) @@ -7364,7 +7409,7 @@ enum skl_disp_power_wells { #define TRANS_MSA_16_BPC (4<<5) /* LCPLL Control */ -#define LCPLL_CTL 0x130040 +#define LCPLL_CTL _MMIO(0x130040) #define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_LOCK (1<<30) #define LCPLL_CLK_FREQ_MASK (3<<26) @@ -7384,7 +7429,7 @@ enum skl_disp_power_wells { */ /* CDCLK_CTL */ -#define CDCLK_CTL 0x46000 +#define CDCLK_CTL _MMIO(0x46000) #define CDCLK_FREQ_SEL_MASK (3<<26) #define CDCLK_FREQ_450_432 (0<<26) #define CDCLK_FREQ_540 (1<<26) @@ -7400,12 +7445,12 @@ enum skl_disp_power_wells { #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) /* LCPLL_CTL */ -#define LCPLL1_CTL 0x46010 -#define LCPLL2_CTL 0x46014 +#define LCPLL1_CTL _MMIO(0x46010) +#define LCPLL2_CTL _MMIO(0x46014) #define LCPLL_PLL_ENABLE (1<<31) /* DPLL control1 */ -#define DPLL_CTRL1 0x6C058 +#define DPLL_CTRL1 _MMIO(0x6C058) #define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5)) #define DPLL_CTRL1_SSC(id) (1<<((id)*6+4)) #define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1)) @@ -7420,7 +7465,7 @@ enum skl_disp_power_wells { #define DPLL_CTRL1_LINK_RATE_2160 5 /* DPLL control2 */ -#define DPLL_CTRL2 0x6C05C +#define DPLL_CTRL2 _MMIO(0x6C05C) #define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15)) #define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) #define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) @@ -7428,21 +7473,21 @@ enum skl_disp_power_wells { #define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) /* DPLL Status */ -#define DPLL_STATUS 0x6C060 +#define DPLL_STATUS _MMIO(0x6C060) #define DPLL_LOCK(id) (1<<((id)*8)) /* DPLL cfg */ -#define DPLL1_CFGCR1 0x6C040 -#define DPLL2_CFGCR1 0x6C048 -#define DPLL3_CFGCR1 0x6C050 +#define _DPLL1_CFGCR1 0x6C040 +#define _DPLL2_CFGCR1 0x6C048 +#define _DPLL3_CFGCR1 0x6C050 #define DPLL_CFGCR1_FREQ_ENABLE (1<<31) #define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) #define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9) #define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) -#define DPLL1_CFGCR2 0x6C044 -#define DPLL2_CFGCR2 0x6C04C -#define DPLL3_CFGCR2 0x6C054 +#define _DPLL1_CFGCR2 0x6C044 +#define _DPLL2_CFGCR2 0x6C04C +#define _DPLL3_CFGCR2 0x6C054 #define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) #define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8) #define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7) @@ -7460,58 +7505,58 @@ enum skl_disp_power_wells { #define DPLL_CFGCR2_PDIV_7 (4<<2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) -#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8) -#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8) +#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2) +#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) /* BXT display engine PLL */ -#define BXT_DE_PLL_CTL 0x6d000 +#define BXT_DE_PLL_CTL _MMIO(0x6d000) #define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ #define BXT_DE_PLL_RATIO_MASK 0xff -#define BXT_DE_PLL_ENABLE 0x46070 +#define BXT_DE_PLL_ENABLE _MMIO(0x46070) #define BXT_DE_PLL_PLL_ENABLE (1 << 31) #define BXT_DE_PLL_LOCK (1 << 30) /* GEN9 DC */ -#define DC_STATE_EN 0x45504 +#define DC_STATE_EN _MMIO(0x45504) +#define DC_STATE_DISABLE 0 #define DC_STATE_EN_UPTO_DC5 (1<<0) #define DC_STATE_EN_DC9 (1<<3) #define DC_STATE_EN_UPTO_DC6 (2<<0) #define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 -#define DC_STATE_DEBUG 0x45520 +#define DC_STATE_DEBUG _MMIO(0x45520) #define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, * since on HSW we can't write to it using I915_WRITE. */ -#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) -#define D_COMP_BDW 0x138144 +#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +#define D_COMP_BDW _MMIO(0x138144) #define D_COMP_RCOMP_IN_PROGRESS (1<<9) #define D_COMP_COMP_FORCE (1<<8) #define D_COMP_COMP_DISABLE (1<<0) /* Pipe WM_LINETIME - watermark line time */ -#define PIPE_WM_LINETIME_A 0x45270 -#define PIPE_WM_LINETIME_B 0x45274 -#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ - PIPE_WM_LINETIME_B) +#define _PIPE_WM_LINETIME_A 0x45270 +#define _PIPE_WM_LINETIME_B 0x45274 +#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B) #define PIPE_WM_LINETIME_MASK (0x1ff) #define PIPE_WM_LINETIME_TIME(x) ((x)) #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) #define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) /* SFUSE_STRAP */ -#define SFUSE_STRAP 0xc2014 +#define SFUSE_STRAP _MMIO(0xc2014) #define SFUSE_STRAP_FUSE_LOCK (1<<13) #define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) #define SFUSE_STRAP_DDIB_DETECTED (1<<2) #define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDID_DETECTED (1<<0) -#define WM_MISC 0x45260 +#define WM_MISC _MMIO(0x45260) #define WM_MISC_DATA_PARTITION_5_6 (1 << 0) -#define WM_DBG 0x45280 +#define WM_DBG _MMIO(0x45280) #define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) #define WM_DBG_DISALLOW_MAXFIFO (1<<1) #define WM_DBG_DISALLOW_SPRITE (1<<2) @@ -7548,28 +7593,29 @@ enum skl_disp_power_wells { #define _PIPE_B_CSC_POSTOFF_ME 0x49144 #define _PIPE_B_CSC_POSTOFF_LO 0x49148 -#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) -#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) -#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) -#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) -#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) -#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) -#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) -#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) -#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) -#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) -#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) -#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) -#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) +#define PIPE_CSC_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) +#define PIPE_CSC_COEFF_BY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) +#define PIPE_CSC_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) +#define PIPE_CSC_COEFF_BU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) +#define PIPE_CSC_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) +#define PIPE_CSC_COEFF_BV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) +#define PIPE_CSC_MODE(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) +#define PIPE_CSC_PREOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) +#define PIPE_CSC_PREOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) +#define PIPE_CSC_PREOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) +#define PIPE_CSC_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) +#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) +#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) /* MIPI DSI registers */ #define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ +#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) /* BXT MIPI clock controls */ #define BXT_MAX_VAR_OUTPUT_KHZ 39500 -#define BXT_MIPI_CLOCK_CTL 0x46090 +#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090) #define BXT_MIPI1_DIV_SHIFT 26 #define BXT_MIPI2_DIV_SHIFT 10 #define BXT_MIPI_DIV_SHIFT(port) \ @@ -7631,20 +7677,20 @@ enum skl_disp_power_wells { /* BXT MIPI mode configure */ #define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 #define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 -#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \ +#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \ _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) #define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC #define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC -#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \ +#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \ _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) #define _BXT_MIPIA_TRANS_VTOTAL 0x6B100 #define _BXT_MIPIC_TRANS_VTOTAL 0x6B900 -#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \ +#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \ _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) -#define BXT_DSI_PLL_CTL 0x161000 +#define BXT_DSI_PLL_CTL _MMIO(0x161000) #define BXT_DSI_PLL_PVD_RATIO_SHIFT 16 #define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT) #define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT) @@ -7662,19 +7708,18 @@ enum skl_disp_power_wells { #define BXT_DSI_PLL_RATIO_MASK 0xFF #define BXT_REF_CLOCK_KHZ 19500 -#define BXT_DSI_PLL_ENABLE 0x46080 +#define BXT_DSI_PLL_ENABLE _MMIO(0x46080) #define BXT_DSI_PLL_DO_ENABLE (1 << 31) #define BXT_DSI_PLL_LOCKED (1 << 30) #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) #define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) -#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) +#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) /* BXT port control */ #define _BXT_MIPIA_PORT_CTRL 0x6B0C0 #define _BXT_MIPIC_PORT_CTRL 0x6B8C0 -#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \ - _BXT_MIPIC_PORT_CTRL) +#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) #define DPI_ENABLE (1 << 31) /* A + C */ #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 @@ -7718,8 +7763,7 @@ enum skl_disp_power_wells { #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) #define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) -#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \ - _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) +#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) #define TEARING_EFFECT_DELAY_SHIFT 0 #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) @@ -7730,8 +7774,7 @@ enum skl_disp_power_wells { #define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) #define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) -#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \ - _MIPIC_DEVICE_READY) +#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ #define ULPS_STATE_MASK (3 << 1) #define ULPS_STATE_ENTER (2 << 1) @@ -7741,12 +7784,10 @@ enum skl_disp_power_wells { #define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) #define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) -#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \ - _MIPIC_INTR_STAT) +#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) #define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) #define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) -#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \ - _MIPIC_INTR_EN) +#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) #define TEARING_EFFECT (1 << 31) #define SPL_PKT_SENT_INTERRUPT (1 << 30) #define GEN_READ_DATA_AVAIL (1 << 29) @@ -7782,8 +7823,7 @@ enum skl_disp_power_wells { #define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) #define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) -#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \ - _MIPIC_DSI_FUNC_PRG) +#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) #define CMD_MODE_NOT_SUPPORTED (0 << 13) #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) @@ -7806,32 +7846,27 @@ enum skl_disp_power_wells { #define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) #define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) -#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \ - _MIPIC_HS_TX_TIMEOUT) +#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff #define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) #define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) -#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \ - _MIPIC_LP_RX_TIMEOUT) +#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff #define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) #define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) -#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \ - _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) +#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) #define TURN_AROUND_TIMEOUT_MASK 0x3f #define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) #define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) -#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \ - _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) +#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) #define DEVICE_RESET_TIMER_MASK 0xffff #define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) #define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) -#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \ - _MIPIC_DPI_RESOLUTION) +#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) #define VERTICAL_ADDRESS_SHIFT 16 #define VERTICAL_ADDRESS_MASK (0xffff << 16) #define HORIZONTAL_ADDRESS_SHIFT 0 @@ -7839,8 +7874,7 @@ enum skl_disp_power_wells { #define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) #define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) -#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \ - _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) +#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) #define DBI_FIFO_EMPTY_HALF (0 << 0) #define DBI_FIFO_EMPTY_QUARTER (1 << 0) #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) @@ -7848,50 +7882,41 @@ enum skl_disp_power_wells { /* regs below are bits 15:0 */ #define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) #define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) -#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ - _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) +#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) #define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) #define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) -#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \ - _MIPIC_HBP_COUNT) +#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) #define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) #define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) -#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \ - _MIPIC_HFP_COUNT) +#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) #define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) #define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) -#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \ - _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) +#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) #define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) #define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) -#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ - _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) +#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) #define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) #define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) -#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \ - _MIPIC_VBP_COUNT) +#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) #define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) #define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) -#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \ - _MIPIC_VFP_COUNT) +#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) #define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) #define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) -#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \ - _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) +#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) /* regs above are bits 15:0 */ #define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) #define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) -#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \ - _MIPIC_DPI_CONTROL) +#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) #define DPI_LP_MODE (1 << 6) #define BACKLIGHT_OFF (1 << 5) #define BACKLIGHT_ON (1 << 4) @@ -7902,29 +7927,26 @@ enum skl_disp_power_wells { #define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) #define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) -#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \ - _MIPIC_DPI_DATA) +#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) #define COMMAND_BYTE_SHIFT 0 #define COMMAND_BYTE_MASK (0x3f << 0) #define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) #define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) -#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \ - _MIPIC_INIT_COUNT) +#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) #define MASTER_INIT_TIMER_SHIFT 0 #define MASTER_INIT_TIMER_MASK (0xffff << 0) #define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) #define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) -#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \ +#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) #define MAX_RETURN_PKT_SIZE_SHIFT 0 #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) #define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) #define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) -#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \ - _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) +#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) #define DISABLE_VIDEO_BTA (1 << 3) #define IP_TG_CONFIG (1 << 2) @@ -7934,8 +7956,7 @@ enum skl_disp_power_wells { #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) #define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) -#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \ - _MIPIC_EOT_DISABLE) +#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) @@ -7947,31 +7968,26 @@ enum skl_disp_power_wells { #define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) #define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) -#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \ - _MIPIC_LP_BYTECLK) +#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) #define LP_BYTECLK_SHIFT 0 #define LP_BYTECLK_MASK (0xffff << 0) /* bits 31:0 */ #define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) #define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) -#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \ - _MIPIC_LP_GEN_DATA) +#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) /* bits 31:0 */ #define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) #define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) -#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \ - _MIPIC_HS_GEN_DATA) +#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) #define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) #define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) -#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \ - _MIPIC_LP_GEN_CTRL) +#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) #define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) #define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) -#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \ - _MIPIC_HS_GEN_CTRL) +#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) #define LONG_PACKET_WORD_COUNT_SHIFT 8 #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) #define SHORT_PACKET_PARAM_SHIFT 8 @@ -7984,8 +8000,7 @@ enum skl_disp_power_wells { #define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) #define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) -#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \ - _MIPIC_GEN_FIFO_STAT) +#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) #define DPI_FIFO_EMPTY (1 << 28) #define DBI_FIFO_EMPTY (1 << 27) #define LP_CTRL_FIFO_EMPTY (1 << 26) @@ -8003,16 +8018,14 @@ enum skl_disp_power_wells { #define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) #define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) -#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \ - _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) +#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) #define DBI_HS_LP_MODE_MASK (1 << 0) #define DBI_LP_MODE (1 << 0) #define DBI_HS_MODE (0 << 0) #define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) #define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) -#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \ - _MIPIC_DPHY_PARAM) +#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) #define EXIT_ZERO_COUNT_SHIFT 24 #define EXIT_ZERO_COUNT_MASK (0x3f << 24) #define TRAIL_COUNT_SHIFT 16 @@ -8025,15 +8038,11 @@ enum skl_disp_power_wells { /* bits 31:0 */ #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) #define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) -#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \ - _MIPIC_DBI_BW_CTRL) - -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ - + 0xb088) -#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ - + 0xb888) -#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \ - _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) +#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) + +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088) +#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888) +#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) #define LP_HS_SSW_CNT_SHIFT 16 #define LP_HS_SSW_CNT_MASK (0xffff << 16) #define HS_LP_PWR_SW_CNT_SHIFT 0 @@ -8041,19 +8050,16 @@ enum skl_disp_power_wells { #define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) #define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) -#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \ - _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) +#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) #define STOP_STATE_STALL_COUNTER_SHIFT 0 #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) #define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) #define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) -#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \ - _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) +#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) #define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) #define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) -#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \ - _MIPIC_INTR_EN_REG_1) +#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) #define RX_CONTENTION_DETECTED (1 << 0) /* XXX: only pipe A ?!? */ @@ -8073,8 +8079,7 @@ enum skl_disp_power_wells { #define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) #define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) -#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \ - _MIPIC_CTRL) +#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL) #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) @@ -8093,23 +8098,20 @@ enum skl_disp_power_wells { #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) -#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \ - _MIPIC_DATA_ADDRESS) +#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) #define DATA_MEM_ADDRESS_SHIFT 5 #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) #define DATA_VALID (1 << 0) #define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) #define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) -#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \ - _MIPIC_DATA_LENGTH) +#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) #define DATA_LENGTH_SHIFT 0 #define DATA_LENGTH_MASK (0xfffff << 0) #define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) #define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) -#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \ - _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) +#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) #define COMMAND_MEM_ADDRESS_SHIFT 5 #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) #define AUTO_PWG_ENABLE (1 << 2) @@ -8118,21 +8120,17 @@ enum skl_disp_power_wells { #define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) #define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) -#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \ - _MIPIC_COMMAND_LENGTH) +#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) #define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) #define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) -#define MIPI_READ_DATA_RETURN(port, n) \ - (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \ - + 4 * (n)) /* n: 0...7 */ +#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ #define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) #define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) -#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \ - _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) +#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) /* For UMS only (deprecated): */ @@ -8140,12 +8138,12 @@ enum skl_disp_power_wells { #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) /* MOCS (Memory Object Control State) registers */ -#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */ +#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ -#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/ -#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/ -#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/ -#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/ -#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/ +#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */ +#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */ +#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */ +#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */ +#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */ #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 50ce9ce2b269..f929c61f0fa2 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -35,7 +35,8 @@ #define dev_to_drm_minor(d) dev_get_drvdata((d)) #ifdef CONFIG_PM -static u32 calc_residency(struct drm_device *dev, const u32 reg) +static u32 calc_residency(struct drm_device *dev, + i915_reg_t reg) { struct drm_i915_private *dev_priv = dev->dev_private; u64 raw_time; /* 32b value may overflow during fixed point math */ diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 04fe8491c8b6..52b2d409945d 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -664,7 +664,7 @@ TRACE_EVENT(i915_flip_complete, ); TRACE_EVENT_CONDITION(i915_reg_rw, - TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), + TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace), TP_ARGS(write, reg, val, len, trace), @@ -679,7 +679,7 @@ TRACE_EVENT_CONDITION(i915_reg_rw, TP_fast_assign( __entry->val = (u64)val; - __entry->reg = reg; + __entry->reg = i915_mmio_reg_offset(reg); __entry->write = write; __entry->len = len; ), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 5eee75bff170..dea7429be4d0 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev) if (!IS_HASWELL(dev)) return; - magic = readq(dev_priv->regs + vgtif_reg(magic)); + magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); if (magic != VGT_MAGIC) return; version = INTEL_VGT_IF_VERSION_ENCODE( - readw(dev_priv->regs + vgtif_reg(version_major)), - readw(dev_priv->regs + vgtif_reg(version_minor))); + __raw_i915_read16(dev_priv, vgtif_reg(version_major)), + __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); if (version != INTEL_VGT_IF_VERSION) { DRM_INFO("VGT interface version mismatch!\n"); return; diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 21c97f44d637..3c83b47b5f69 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -92,14 +92,10 @@ struct vgt_if { uint32_t g2v_notify; uint32_t rsv6[7]; - uint32_t pdp0_lo; - uint32_t pdp0_hi; - uint32_t pdp1_lo; - uint32_t pdp1_hi; - uint32_t pdp2_lo; - uint32_t pdp2_hi; - uint32_t pdp3_lo; - uint32_t pdp3_hi; + struct { + uint32_t lo; + uint32_t hi; + } pdp[4]; uint32_t execlist_context_descriptor_lo; uint32_t execlist_context_descriptor_hi; @@ -108,7 +104,7 @@ struct vgt_if { } __packed; #define vgtif_reg(x) \ - (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x) + _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)) /* vGPU display status to be used by the host side */ #define VGT_DRV_DISPLAY_NOT_READY 0 diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index f1975f267710..643f342de33b 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); crtc_state->update_pipe = false; + crtc_state->disable_lp_wm = false; return &crtc_state->base; } @@ -205,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, * but since this plane is unchanged just do the * minimum required validation. */ - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - intel_crtc->atomic.wait_for_flips = true; crtc_state->base.planes_changed = true; } diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index a11980696595..c6bb0fc1edfb 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane) state = &intel_state->base; __drm_atomic_helper_plane_duplicate_state(plane, state); + intel_state->wait_req = NULL; return state; } @@ -100,6 +101,7 @@ void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + WARN_ON(state && to_intel_plane_state(state)->wait_req); drm_atomic_helper_plane_destroy_state(plane, state); } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 4dccd9b003a1..de465f2876d1 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc, } static bool intel_eld_uptodate(struct drm_connector *connector, - int reg_eldv, uint32_t bits_eldv, - int reg_elda, uint32_t bits_elda, - int reg_edid) + i915_reg_t reg_eldv, uint32_t bits_eldv, + i915_reg_t reg_elda, uint32_t bits_elda, + i915_reg_t reg_edid) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; @@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) enum port port = intel_dig_port->port; enum pipe pipe = intel_crtc->pipe; uint32_t tmp, eldv; - int aud_config; - int aud_cntrl_st2; + i915_reg_t aud_config, aud_cntrl_st2; DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); @@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, uint32_t eldv; uint32_t tmp; int len, i; - int hdmiw_hdmiedid; - int aud_config; - int aud_cntl_st; - int aud_cntrl_st2; + i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2; DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", port_name(port), pipe_name(pipe), drm_eld_size(eld)); @@ -525,6 +521,10 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) dev_priv->display.audio_codec_enable(connector, intel_encoder, adjusted_mode); + mutex_lock(&dev_priv->av_mutex); + intel_dig_port->audio_connector = connector; + mutex_unlock(&dev_priv->av_mutex); + if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); } @@ -548,6 +548,10 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder) if (dev_priv->display.audio_codec_disable) dev_priv->display.audio_codec_disable(intel_encoder); + mutex_lock(&dev_priv->av_mutex); + intel_dig_port->audio_connector = NULL; + mutex_unlock(&dev_priv->av_mutex); + if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); } @@ -591,7 +595,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev, struct drm_i915_private *dev_priv = dev_to_i915(dev); u32 tmp; - if (!IS_SKYLAKE(dev_priv)) + if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) return; /* @@ -632,44 +636,40 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, int port, int rate) { struct drm_i915_private *dev_priv = dev_to_i915(dev); - struct drm_device *drm_dev = dev_priv->dev; struct intel_encoder *intel_encoder; - struct intel_digital_port *intel_dig_port; struct intel_crtc *crtc; struct drm_display_mode *mode; struct i915_audio_component *acomp = dev_priv->audio_component; - enum pipe pipe = -1; + enum pipe pipe = INVALID_PIPE; u32 tmp; int n; + int err = 0; - /* HSW, BDW SKL need this fix */ + /* HSW, BDW, SKL, KBL need this fix */ if (!IS_SKYLAKE(dev_priv) && - !IS_BROADWELL(dev_priv) && - !IS_HASWELL(dev_priv)) + !IS_KABYLAKE(dev_priv) && + !IS_BROADWELL(dev_priv) && + !IS_HASWELL(dev_priv)) return 0; mutex_lock(&dev_priv->av_mutex); /* 1. get the pipe */ - for_each_intel_encoder(drm_dev, intel_encoder) { - if (intel_encoder->type != INTEL_OUTPUT_HDMI) - continue; - intel_dig_port = enc_to_dig_port(&intel_encoder->base); - if (port == intel_dig_port->port) { - crtc = to_intel_crtc(intel_encoder->base.crtc); - if (!crtc) { - DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__); - continue; - } - pipe = crtc->pipe; - break; - } + intel_encoder = dev_priv->dig_port_map[port]; + /* intel_encoder might be NULL for DP MST */ + if (!intel_encoder || !intel_encoder->base.crtc || + intel_encoder->type != INTEL_OUTPUT_HDMI) { + DRM_DEBUG_KMS("no valid port %c\n", port_name(port)); + err = -ENODEV; + goto unlock; } - + crtc = to_intel_crtc(intel_encoder->base.crtc); + pipe = crtc->pipe; if (pipe == INVALID_PIPE) { DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port)); - mutex_unlock(&dev_priv->av_mutex); - return -ENODEV; + err = -ENODEV; + goto unlock; } + DRM_DEBUG_KMS("pipe %c connects port %c\n", pipe_name(pipe), port_name(port)); mode = &crtc->config->base.adjusted_mode; @@ -682,8 +682,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp &= ~AUD_CONFIG_N_PROG_ENABLE; I915_WRITE(HSW_AUD_CFG(pipe), tmp); - mutex_unlock(&dev_priv->av_mutex); - return 0; + goto unlock; } n = audio_config_get_n(mode, rate); @@ -693,8 +692,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp &= ~AUD_CONFIG_N_PROG_ENABLE; I915_WRITE(HSW_AUD_CFG(pipe), tmp); - mutex_unlock(&dev_priv->av_mutex); - return 0; + goto unlock; } /* 3. set the N/CTS/M */ @@ -702,8 +700,37 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, tmp = audio_config_setup_n_reg(n, tmp); I915_WRITE(HSW_AUD_CFG(pipe), tmp); + unlock: mutex_unlock(&dev_priv->av_mutex); - return 0; + return err; +} + +static int i915_audio_component_get_eld(struct device *dev, int port, + bool *enabled, + unsigned char *buf, int max_bytes) +{ + struct drm_i915_private *dev_priv = dev_to_i915(dev); + struct intel_encoder *intel_encoder; + struct intel_digital_port *intel_dig_port; + const u8 *eld; + int ret = -EINVAL; + + mutex_lock(&dev_priv->av_mutex); + intel_encoder = dev_priv->dig_port_map[port]; + /* intel_encoder might be NULL for DP MST */ + if (intel_encoder) { + ret = 0; + intel_dig_port = enc_to_dig_port(&intel_encoder->base); + *enabled = intel_dig_port->audio_connector != NULL; + if (*enabled) { + eld = intel_dig_port->audio_connector->eld; + ret = drm_eld_size(eld); + memcpy(buf, eld, min(max_bytes, ret)); + } + } + + mutex_unlock(&dev_priv->av_mutex); + return ret; } static const struct i915_audio_component_ops i915_audio_component_ops = { @@ -713,6 +740,7 @@ static const struct i915_audio_component_ops i915_audio_component_ops = { .codec_wake_override = i915_audio_component_codec_wake_override, .get_cdclk_freq = i915_audio_component_get_cdclk_freq, .sync_audio_rate = i915_audio_component_sync_audio_rate, + .get_eld = i915_audio_component_get_eld, }; static int i915_audio_component_bind(struct device *i915_dev, diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 6a2c76e367a5..27b3e610e8f0 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -50,7 +50,7 @@ struct intel_crt { * encoder's enable/disable callbacks */ struct intel_connector *connector; bool force_hotplug_required; - u32 adpa_reg; + i915_reg_t adpa_reg; }; static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) @@ -480,12 +480,8 @@ intel_crt_load_detect(struct intel_crt *crt) uint32_t vsample; uint32_t vblank, vblank_start, vblank_end; uint32_t dsl; - uint32_t bclrpat_reg; - uint32_t vtotal_reg; - uint32_t vblank_reg; - uint32_t vsync_reg; - uint32_t pipeconf_reg; - uint32_t pipe_dsl_reg; + i915_reg_t bclrpat_reg, vtotal_reg, + vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg; uint8_t st00; enum drm_connector_status status; @@ -518,7 +514,7 @@ intel_crt_load_detect(struct intel_crt *crt) /* Wait for next Vblank to substitue * border color for Color info */ intel_wait_for_vblank(dev, pipe); - st00 = I915_READ8(VGA_MSR_WRITE); + st00 = I915_READ8(_VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : connector_status_disconnected; @@ -563,7 +559,7 @@ intel_crt_load_detect(struct intel_crt *crt) do { count++; /* Read the ST00 VGA status register */ - st00 = I915_READ8(VGA_MSR_WRITE); + st00 = I915_READ8(_VGA_MSR_WRITE); if (st00 & (1 << 4)) detect++; } while ((I915_READ(pipe_dsl_reg) == dsl)); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 9e530a739354..6c6a6695e99c 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -47,21 +47,10 @@ MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_BXT); -/* -* SKL CSR registers for DC5 and DC6 -*/ -#define CSR_PROGRAM(i) (0x80000 + (i) * 4) -#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0 -#define CSR_HTP_ADDR_SKL 0x00500034 -#define CSR_SSP_BASE 0x8F074 -#define CSR_HTP_SKL 0x8F004 -#define CSR_LAST_WRITE 0x8F034 -#define CSR_LAST_WRITE_VALUE 0xc003b400 -/* MMIO address range for CSR program (0x80000 - 0x82FFF) */ +#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) + #define CSR_MAX_FW_SIZE 0x2FFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF -#define CSR_MMIO_START_RANGE 0x80000 -#define CSR_MMIO_END_RANGE 0x8FFFF struct intel_css_header { /* 0x09 for DMC */ @@ -178,166 +167,134 @@ struct stepping_info { }; static const struct stepping_info skl_stepping_info[] = { - {'A', '0'}, {'B', '0'}, {'C', '0'}, - {'D', '0'}, {'E', '0'}, {'F', '0'}, - {'G', '0'}, {'H', '0'}, {'I', '0'} + {'A', '0'}, {'B', '0'}, {'C', '0'}, + {'D', '0'}, {'E', '0'}, {'F', '0'}, + {'G', '0'}, {'H', '0'}, {'I', '0'} }; -static struct stepping_info bxt_stepping_info[] = { +static const struct stepping_info bxt_stepping_info[] = { {'A', '0'}, {'A', '1'}, {'A', '2'}, {'B', '0'}, {'B', '1'}, {'B', '2'} }; -static char intel_get_stepping(struct drm_device *dev) -{ - if (IS_SKYLAKE(dev) && (dev->pdev->revision < - ARRAY_SIZE(skl_stepping_info))) - return skl_stepping_info[dev->pdev->revision].stepping; - else if (IS_BROXTON(dev) && (dev->pdev->revision < - ARRAY_SIZE(bxt_stepping_info))) - return bxt_stepping_info[dev->pdev->revision].stepping; - else - return -ENODATA; -} - -static char intel_get_substepping(struct drm_device *dev) +static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev) { - if (IS_SKYLAKE(dev) && (dev->pdev->revision < - ARRAY_SIZE(skl_stepping_info))) - return skl_stepping_info[dev->pdev->revision].substepping; - else if (IS_BROXTON(dev) && (dev->pdev->revision < - ARRAY_SIZE(bxt_stepping_info))) - return bxt_stepping_info[dev->pdev->revision].substepping; - else - return -ENODATA; -} - -/** - * intel_csr_load_status_get() - to get firmware loading status. - * @dev_priv: i915 device. - * - * This function helps to get the firmware loading status. - * - * Return: Firmware loading status. - */ -enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv) -{ - enum csr_state state; + const struct stepping_info *si; + unsigned int size; + + if (IS_SKYLAKE(dev)) { + size = ARRAY_SIZE(skl_stepping_info); + si = skl_stepping_info; + } else if (IS_BROXTON(dev)) { + size = ARRAY_SIZE(bxt_stepping_info); + si = bxt_stepping_info; + } else { + return NULL; + } - mutex_lock(&dev_priv->csr_lock); - state = dev_priv->csr.state; - mutex_unlock(&dev_priv->csr_lock); + if (INTEL_REVID(dev) < size) + return si + INTEL_REVID(dev); - return state; -} - -/** - * intel_csr_load_status_set() - help to set firmware loading status. - * @dev_priv: i915 device. - * @state: enumeration of firmware loading status. - * - * Set the firmware loading status. - */ -void intel_csr_load_status_set(struct drm_i915_private *dev_priv, - enum csr_state state) -{ - mutex_lock(&dev_priv->csr_lock); - dev_priv->csr.state = state; - mutex_unlock(&dev_priv->csr_lock); + return NULL; } /** * intel_csr_load_program() - write the firmware from memory to register. - * @dev: drm device. + * @dev_priv: i915 drm device. * * CSR firmware is read from a .bin file and kept in internal memory one time. * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -void intel_csr_load_program(struct drm_device *dev) +void intel_csr_load_program(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 *payload = dev_priv->csr.dmc_payload; uint32_t i, fw_size; - if (!IS_GEN9(dev)) { + if (!IS_GEN9(dev_priv)) { DRM_ERROR("No CSR support available for this platform\n"); return; } - /* - * FIXME: Firmware gets lost on S3/S4, but not when entering system - * standby or suspend-to-idle (which is just like forced runtime pm). - * Unfortunately the ACPI subsystem doesn't yet give us a way to - * differentiate this, hence figure it out with this hack. - */ - if (I915_READ(CSR_PROGRAM(0))) + if (!dev_priv->csr.dmc_payload) { + DRM_ERROR("Tried to program CSR with empty payload\n"); return; + } - mutex_lock(&dev_priv->csr_lock); fw_size = dev_priv->csr.dmc_fw_size; for (i = 0; i < fw_size; i++) I915_WRITE(CSR_PROGRAM(i), payload[i]); for (i = 0; i < dev_priv->csr.mmio_count; i++) { I915_WRITE(dev_priv->csr.mmioaddr[i], - dev_priv->csr.mmiodata[i]); + dev_priv->csr.mmiodata[i]); } - - dev_priv->csr.state = FW_LOADED; - mutex_unlock(&dev_priv->csr_lock); } -static void finish_csr_load(const struct firmware *fw, void *context) +static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, + const struct firmware *fw) { - struct drm_i915_private *dev_priv = context; struct drm_device *dev = dev_priv->dev; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header *dmc_header; struct intel_csr *csr = &dev_priv->csr; - char stepping = intel_get_stepping(dev); - char substepping = intel_get_substepping(dev); + const struct stepping_info *stepping_info = intel_get_stepping_info(dev); + char stepping, substepping; uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t i; uint32_t *dmc_payload; - bool fw_loaded = false; - if (!fw) { - i915_firmware_load_error_print(csr->fw_path, 0); - goto out; - } + if (!fw) + return NULL; - if ((stepping == -ENODATA) || (substepping == -ENODATA)) { + if (!stepping_info) { DRM_ERROR("Unknown stepping info, firmware loading failed\n"); - goto out; + return NULL; } + stepping = stepping_info->stepping; + substepping = stepping_info->substepping; + /* Extract CSS Header information*/ css_header = (struct intel_css_header *)fw->data; if (sizeof(struct intel_css_header) != - (css_header->header_len * 4)) { + (css_header->header_len * 4)) { DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", - (css_header->header_len * 4)); - goto out; + (css_header->header_len * 4)); + return NULL; } + + csr->version = css_header->version; + + if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) { + DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u," + " please upgrade to v%u.%u or later" + " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n", + CSR_VERSION_MAJOR(csr->version), + CSR_VERSION_MINOR(csr->version), + CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED), + CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED)); + return NULL; + } + readcount += sizeof(struct intel_css_header); /* Extract Package Header information*/ package_header = (struct intel_package_header *) - &fw->data[readcount]; + &fw->data[readcount]; if (sizeof(struct intel_package_header) != - (package_header->header_len * 4)) { + (package_header->header_len * 4)) { DRM_ERROR("Firmware has wrong package header length %u bytes\n", - (package_header->header_len * 4)); - goto out; + (package_header->header_len * 4)); + return NULL; } readcount += sizeof(struct intel_package_header); /* Search for dmc_offset to find firware binary. */ for (i = 0; i < package_header->num_entries; i++) { if (package_header->fw_info[i].substepping == '*' && - stepping == package_header->fw_info[i].stepping) { + stepping == package_header->fw_info[i].stepping) { dmc_offset = package_header->fw_info[i].offset; break; } else if (stepping == package_header->fw_info[i].stepping && @@ -345,12 +302,12 @@ static void finish_csr_load(const struct firmware *fw, void *context) dmc_offset = package_header->fw_info[i].offset; break; } else if (package_header->fw_info[i].stepping == '*' && - package_header->fw_info[i].substepping == '*') + package_header->fw_info[i].substepping == '*') dmc_offset = package_header->fw_info[i].offset; } if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { DRM_ERROR("Firmware not supported for %c stepping\n", stepping); - goto out; + return NULL; } readcount += dmc_offset; @@ -358,26 +315,26 @@ static void finish_csr_load(const struct firmware *fw, void *context) dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", - (dmc_header->header_len)); - goto out; + (dmc_header->header_len)); + return NULL; } readcount += sizeof(struct intel_dmc_header); /* Cache the dmc header info. */ if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { DRM_ERROR("Firmware has wrong mmio count %u\n", - dmc_header->mmio_count); - goto out; + dmc_header->mmio_count); + return NULL; } csr->mmio_count = dmc_header->mmio_count; for (i = 0; i < dmc_header->mmio_count; i++) { if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || - dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { + dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", - dmc_header->mmioaddr[i]); - goto out; + dmc_header->mmioaddr[i]); + return NULL; } - csr->mmioaddr[i] = dmc_header->mmioaddr[i]; + csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]); csr->mmiodata[i] = dmc_header->mmiodata[i]; } @@ -385,56 +342,80 @@ static void finish_csr_load(const struct firmware *fw, void *context) nbytes = dmc_header->fw_size * 4; if (nbytes > CSR_MAX_FW_SIZE) { DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); - goto out; + return NULL; } csr->dmc_fw_size = dmc_header->fw_size; - csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL); - if (!csr->dmc_payload) { + dmc_payload = kmalloc(nbytes, GFP_KERNEL); + if (!dmc_payload) { DRM_ERROR("Memory allocation failed for dmc payload\n"); - goto out; + return NULL; } - dmc_payload = csr->dmc_payload; memcpy(dmc_payload, &fw->data[readcount], nbytes); + return dmc_payload; +} + +static void csr_load_work_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv; + struct intel_csr *csr; + const struct firmware *fw; + int ret; + + dev_priv = container_of(work, typeof(*dev_priv), csr.work); + csr = &dev_priv->csr; + + ret = request_firmware(&fw, dev_priv->csr.fw_path, + &dev_priv->dev->pdev->dev); + if (!fw) + goto out; + + dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); + if (!dev_priv->csr.dmc_payload) + goto out; + /* load csr program during system boot, as needed for DC states */ - intel_csr_load_program(dev); - fw_loaded = true; + intel_csr_load_program(dev_priv); - DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path); out: - if (fw_loaded) - intel_runtime_pm_put(dev_priv); - else - intel_csr_load_status_set(dev_priv, FW_FAILED); + if (dev_priv->csr.dmc_payload) { + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + DRM_INFO("Finished loading %s (v%u.%u)\n", + dev_priv->csr.fw_path, + CSR_VERSION_MAJOR(csr->version), + CSR_VERSION_MINOR(csr->version)); + } else { + DRM_ERROR("Failed to load DMC firmware, disabling rpm\n"); + } release_firmware(fw); } /** * intel_csr_ucode_init() - initialize the firmware loading. - * @dev: drm device. + * @dev_priv: i915 drm device. * * This function is called at the time of loading the display driver to read * firmware from a .bin file and copied into a internal memory. */ -void intel_csr_ucode_init(struct drm_device *dev) +void intel_csr_ucode_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_csr *csr = &dev_priv->csr; - int ret; - if (!HAS_CSR(dev)) + INIT_WORK(&dev_priv->csr.work, csr_load_work_fn); + + if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; else { DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); - intel_csr_load_status_set(dev_priv, FW_FAILED); return; } @@ -444,43 +425,24 @@ void intel_csr_ucode_init(struct drm_device *dev) * Obtain a runtime pm reference, until CSR is loaded, * to avoid entering runtime-suspend. */ - intel_runtime_pm_get(dev_priv); - - /* CSR supported for platform, load firmware */ - ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path, - &dev_priv->dev->pdev->dev, - GFP_KERNEL, dev_priv, - finish_csr_load); - if (ret) { - i915_firmware_load_error_print(csr->fw_path, ret); - intel_csr_load_status_set(dev_priv, FW_FAILED); - } + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + schedule_work(&dev_priv->csr.work); } /** * intel_csr_ucode_fini() - unload the CSR firmware. - * @dev: drm device. + * @dev_priv: i915 drm device. * * Firmmware unloading includes freeing the internal momory and reset the * firmware loading status. */ -void intel_csr_ucode_fini(struct drm_device *dev) +void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_CSR(dev)) + if (!HAS_CSR(dev_priv)) return; - intel_csr_load_status_set(dev_priv, FW_FAILED); - kfree(dev_priv->csr.dmc_payload); -} + flush_work(&dev_priv->csr.work); -void assert_csr_loaded(struct drm_i915_private *dev_priv) -{ - WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED, - "CSR is not loaded.\n"); - WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), - "CSR program storage start is NULL\n"); - WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); - WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); + kfree(dev_priv->csr.dmc_payload); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index a6752a61d99f..59deb0d85533 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { { 0x00002016, 0x000000A0, 0x0 }, { 0x00005012, 0x0000009B, 0x0 }, { 0x00007011, 0x00000088, 0x0 }, - { 0x00009010, 0x000000C7, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ { 0x00002016, 0x0000009B, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x000000C7, 0x0 }, + { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ { 0x00002016, 0x000000DF, 0x0 }, - { 0x00005012, 0x000000C7, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ }; /* Skylake U */ @@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ { 0x0000201B, 0x0000009D, 0x0 }, - { 0x00005012, 0x000000C7, 0x0 }, - { 0x00007011, 0x000000C7, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ { 0x00002016, 0x00000088, 0x0 }, - { 0x00005012, 0x000000C7, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ }; /* Skylake Y */ @@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ { 0x00000018, 0x0000009D, 0x0 }, - { 0x00005012, 0x000000C7, 0x0 }, - { 0x00007011, 0x000000C7, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ { 0x00000018, 0x00000088, 0x0 }, - { 0x00005012, 0x000000C7, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ }; /* @@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) static bool intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) { - return intel_dig_port->hdmi.hdmi_reg; + return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg); } static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, @@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bxt_ddi_vswing_sequence(dev, hdmi_level, port, INTEL_OUTPUT_HDMI); return; - } else if (IS_SKYLAKE(dev)) { + } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { ddi_translations_fdi = NULL; ddi_translations_dp = skl_get_buf_trans_dp(dev, &n_dp_entries); @@ -576,7 +576,7 @@ void intel_prepare_ddi(struct drm_device *dev) static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { - uint32_t reg = DDI_BUF_CTL(port); + i915_reg_t reg = DDI_BUF_CTL(port); int i; for (i = 0; i < 16; i++) { @@ -931,7 +931,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, /* Otherwise a < c && b >= d, do nothing */ } -static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) +static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, + i915_reg_t reg) { int refclk = LC_FREQ; int n, p, r; @@ -967,7 +968,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, uint32_t dpll) { - uint32_t cfgcr1_reg, cfgcr2_reg; + i915_reg_t cfgcr1_reg, cfgcr2_reg; uint32_t cfgcr1_val, cfgcr2_val; uint32_t p0, p1, p2, dco_freq; @@ -1112,10 +1113,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, link_clock = 270000; break; case PORT_CLK_SEL_WRPLL1: - link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); + link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0)); break; case PORT_CLK_SEL_WRPLL2: - link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); + link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1)); break; case PORT_CLK_SEL_SPLL: pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; @@ -1184,7 +1185,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, if (INTEL_INFO(dev)->gen <= 8) hsw_ddi_clock_get(encoder, pipe_config); - else if (IS_SKYLAKE(dev)) + else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) skl_ddi_clock_get(encoder, pipe_config); else if (IS_BROXTON(dev)) bxt_ddi_clock_get(encoder, pipe_config); @@ -1780,7 +1781,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, struct intel_encoder *intel_encoder = intel_ddi_get_crtc_new_encoder(crtc_state); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) return skl_ddi_pll_select(intel_crtc, crtc_state, intel_encoder); else if (IS_BROXTON(dev)) @@ -1942,7 +1943,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { - uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); + i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); @@ -2097,21 +2098,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, iboost = dp_iboost; } else { ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); - iboost = ddi_translations[port].i_boost; + iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_EDP) { if (dp_iboost) { iboost = dp_iboost; } else { ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); - iboost = ddi_translations[port].i_boost; + iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_HDMI) { if (hdmi_iboost) { iboost = hdmi_iboost; } else { ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); - iboost = ddi_translations[port].i_boost; + iboost = ddi_translations[level].i_boost; } } else { return; @@ -2263,7 +2264,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) skl_ddi_set_iboost(dev, level, port, encoder->type); else if (IS_BROXTON(dev)) bxt_ddi_vswing_sequence(dev, level, port, encoder->type); @@ -2271,30 +2272,21 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) return DDI_BUF_TRANS_SELECT(level); } -static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) +void intel_ddi_clk_select(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) { - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - int hdmi_level; - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - intel_edp_panel_on(intel_dp); - } + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = intel_ddi_get_encoder_port(encoder); - if (IS_SKYLAKE(dev)) { - uint32_t dpll = crtc->config->ddi_pll_sel; + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + uint32_t dpll = pipe_config->ddi_pll_sel; uint32_t val; /* * DPLL0 is used for eDP and is the only "private" DPLL (as * opposed to shared) on SKL */ - if (type == INTEL_OUTPUT_EDP) { + if (encoder->type == INTEL_OUTPUT_EDP) { WARN_ON(dpll != SKL_DPLL0); val = I915_READ(DPLL_CTRL1); @@ -2302,7 +2294,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) | DPLL_CTRL1_LINK_RATE_MASK(dpll)); - val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6); + val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6); I915_WRITE(DPLL_CTRL1, val); POSTING_READ(DPLL_CTRL1); @@ -2318,10 +2310,28 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) I915_WRITE(DPLL_CTRL2, val); - } else if (INTEL_INFO(dev)->gen < 9) { - WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE); - I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel); + } else if (INTEL_INFO(dev_priv)->gen < 9) { + WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE); + I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel); } +} + +static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); + enum port port = intel_ddi_get_encoder_port(intel_encoder); + int type = intel_encoder->type; + int hdmi_level; + + if (type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_edp_panel_on(intel_dp); + } + + intel_ddi_clk_select(intel_encoder, crtc->config); if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2381,7 +2391,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) intel_edp_panel_off(intel_dp); } - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port))); else if (INTEL_INFO(dev)->gen < 9) @@ -2553,7 +2563,7 @@ static const char * const skl_ddi_pll_names[] = { }; struct skl_dpll_regs { - u32 ctl, cfgcr1, cfgcr2; + i915_reg_t ctl, cfgcr1, cfgcr2; }; /* this array is indexed by the *shared* pll id */ @@ -2566,13 +2576,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = { }, { /* DPLL 2 */ - .ctl = WRPLL_CTL1, + .ctl = WRPLL_CTL(0), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), }, { /* DPLL 3 */ - .ctl = WRPLL_CTL2, + .ctl = WRPLL_CTL(1), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), }, @@ -2992,22 +3002,22 @@ void intel_ddi_pll_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t val = I915_READ(LCPLL_CTL); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) skl_shared_dplls_init(dev_priv); else if (IS_BROXTON(dev)) bxt_shared_dplls_init(dev_priv); else hsw_shared_dplls_init(dev_priv); - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { int cdclk_freq; cdclk_freq = dev_priv->display.get_display_clock_speed(dev); dev_priv->skl_boot_cdclk = cdclk_freq; + if (skl_sanitize_cdclk(dev_priv)) + DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) DRM_ERROR("LCPLL1 is disabled\n"); - else - intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); } else if (IS_BROXTON(dev)) { broxton_init_cdclk(dev); broxton_ddi_phy_init(dev); @@ -3026,11 +3036,11 @@ void intel_ddi_pll_init(struct drm_device *dev) } } -void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) +void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = + to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->port; uint32_t val; bool wait = false; @@ -3285,10 +3295,25 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->get_config = intel_ddi_get_config; intel_dig_port->port = port; + dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + /* + * Bspec says that DDI_A_4_LANES is the only supported configuration + * for Broxton. Yet some BIOS fail to set this bit on port A if eDP + * wasn't lit up at boot. Force this bit on in our internal + * configuration so that we use the proper lane count for our + * calculations. + */ + if (IS_BROXTON(dev) && port == PORT_A) { + if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) { + DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n"); + intel_dig_port->saved_port_bits |= DDI_A_4_LANES; + } + } + intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; @@ -3302,8 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) * On BXT A0/A1, sw needs to activate DDIA HPD logic and * interrupts to check the external panel connection. */ - if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0) - && port == PORT_B) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B) dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; else dev_priv->hotplug.irq_port[port] = intel_dig_port; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 71860f8680f9..696f7543d264 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1095,7 +1095,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg = PIPEDSL(pipe); + i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; u32 line_mask; @@ -1135,7 +1135,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) enum pipe pipe = crtc->pipe; if (INTEL_INFO(dev)->gen >= 4) { - int reg = PIPECONF(cpu_transcoder); + i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, @@ -1285,7 +1285,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { struct drm_device *dev = dev_priv->dev; - int pp_reg; + i915_reg_t pp_reg; u32 val; enum pipe panel_pipe = PIPE_A; bool locked = true; @@ -1480,8 +1480,7 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, return false; if (HAS_PCH_CPT(dev_priv->dev)) { - u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); - u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); + u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) return false; } else if (IS_CHERRYVIEW(dev_priv->dev)) { @@ -1545,12 +1544,13 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, } static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, int reg, u32 port_sel) + enum pipe pipe, i915_reg_t reg, + u32 port_sel) { u32 val = I915_READ(reg); I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", - reg, pipe_name(pipe)); + i915_mmio_reg_offset(reg), pipe_name(pipe)); I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 && (val & DP_PIPEB_SELECT), @@ -1558,12 +1558,12 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, int reg) + enum pipe pipe, i915_reg_t reg) { u32 val = I915_READ(reg); I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", - reg, pipe_name(pipe)); + i915_mmio_reg_offset(reg), pipe_name(pipe)); I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 && (val & SDVO_PIPE_B_SELECT), @@ -1599,7 +1599,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int reg = DPLL(crtc->pipe); + i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = pipe_config->dpll_hw_state.dpll; assert_pipe_disabled(dev_priv, crtc->pipe); @@ -1688,7 +1688,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int reg = DPLL(crtc->pipe); + i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = crtc->config->dpll_hw_state.dpll; assert_pipe_disabled(dev_priv, crtc->pipe); @@ -1837,7 +1837,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, unsigned int expected_mask) { u32 port_mask; - int dpll_reg; + i915_reg_t dpll_reg; switch (dport->port) { case PORT_B: @@ -1962,7 +1962,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t reg, val, pipeconf_val; + i915_reg_t reg; + uint32_t val, pipeconf_val; /* PCH only available on ILK+ */ BUG_ON(!HAS_PCH_SPLIT(dev)); @@ -2051,7 +2052,8 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { struct drm_device *dev = dev_priv->dev; - uint32_t reg, val; + i915_reg_t reg; + uint32_t val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -2068,7 +2070,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); - if (!HAS_PCH_IBX(dev)) { + if (HAS_PCH_CPT(dev)) { /* Workaround: Clear the timing override chicken bit again. */ reg = TRANS_CHICKEN2(pipe); val = I915_READ(reg); @@ -2106,10 +2108,9 @@ static void intel_enable_pipe(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe = crtc->pipe; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); + enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pch_transcoder; - int reg; + i915_reg_t reg; u32 val; DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); @@ -2170,7 +2171,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; - int reg; + i915_reg_t reg; u32 val; DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); @@ -2269,20 +2270,20 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height, fb_format_modifier, 0)); } -static int +static void intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, const struct drm_plane_state *plane_state) { - struct intel_rotation_info *info = &view->rotation_info; + struct intel_rotation_info *info = &view->params.rotation_info; unsigned int tile_height, tile_pitch; *view = i915_ggtt_view_normal; if (!plane_state) - return 0; + return; if (!intel_rotation_90_or_270(plane_state->rotation)) - return 0; + return; *view = i915_ggtt_view_rotated; @@ -2309,8 +2310,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, info->size_uv = info->width_pages_uv * info->height_pages_uv * PAGE_SIZE; } - - return 0; } static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) @@ -2329,9 +2328,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, - const struct drm_plane_state *plane_state, - struct intel_engine_cs *pipelined, - struct drm_i915_gem_request **pipelined_request) + const struct drm_plane_state *plane_state) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2366,9 +2363,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, return -EINVAL; } - ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); - if (ret) - return ret; + intel_fill_fb_ggtt_view(&view, fb, plane_state); /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so @@ -2387,11 +2382,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, */ intel_runtime_pm_get(dev_priv); - dev_priv->mm.interruptible = false; - ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, - pipelined_request, &view); + ret = i915_gem_object_pin_to_display_plane(obj, alignment, + &view); if (ret) - goto err_interruptible; + goto err_pm; /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using @@ -2417,14 +2411,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, i915_gem_object_pin_fence(obj); } - dev_priv->mm.interruptible = true; intel_runtime_pm_put(dev_priv); return 0; err_unpin: i915_gem_object_unpin_from_display_plane(obj, &view); -err_interruptible: - dev_priv->mm.interruptible = true; +err_pm: intel_runtime_pm_put(dev_priv); return ret; } @@ -2434,12 +2426,10 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; - int ret; WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); - ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); - WARN_ONCE(ret, "Couldn't get view from plane state!"); + intel_fill_fb_ggtt_view(&view, fb, plane_state); if (view.type == I915_GGTT_VIEW_NORMAL) i915_gem_object_unpin_fence(obj); @@ -2680,7 +2670,7 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; - u32 reg = DSPCNTR(plane); + i915_reg_t reg = DSPCNTR(plane); int pixel_size; if (!visible || !fb) { @@ -2810,7 +2800,7 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; - u32 reg = DSPCNTR(plane); + i915_reg_t reg = DSPCNTR(plane); int pixel_size; if (!visible || !fb) { @@ -2935,30 +2925,32 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, } } -unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane) +u32 intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj, + unsigned int plane) { - const struct i915_ggtt_view *view = &i915_ggtt_view_normal; + struct i915_ggtt_view view; struct i915_vma *vma; - unsigned char *offset; + u64 offset; - if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) - view = &i915_ggtt_view_rotated; + intel_fill_fb_ggtt_view(&view, intel_plane->base.fb, + intel_plane->base.state); - vma = i915_gem_obj_to_ggtt_view(obj, view); + vma = i915_gem_obj_to_ggtt_view(obj, &view); if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", - view->type)) + view.type)) return -1; - offset = (unsigned char *)vma->node.start; + offset = vma->node.start; if (plane == 1) { - offset += vma->ggtt_view.rotation_info.uv_start_page * + offset += vma->ggtt_view.params.rotation_info.uv_start_page * PAGE_SIZE; } - return (unsigned long)offset; + WARN_ON(upper_32_bits(offset)); + + return lower_32_bits(offset); } static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) @@ -3084,7 +3076,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, u32 tile_height, plane_offset, plane_size; unsigned int rotation; int x_offset, y_offset; - unsigned long surf_addr; + u32 surf_addr; struct intel_crtc_state *crtc_state = intel_crtc->config; struct intel_plane_state *plane_state; int src_x = 0, src_y = 0, src_w = 0, src_h = 0; @@ -3212,10 +3204,9 @@ static void intel_update_primary_planes(struct drm_device *dev) struct intel_plane_state *plane_state; drm_modeset_lock_crtc(crtc, &plane->base); - plane_state = to_intel_plane_state(plane->base.state); - if (plane_state->base.fb) + if (crtc->state->active && plane_state->base.fb) plane->commit_plane(&plane->base, plane_state); drm_modeset_unlock_crtc(crtc); @@ -3291,32 +3282,6 @@ void intel_finish_reset(struct drm_device *dev) drm_modeset_unlock_all(dev); } -static void -intel_finish_fb(struct drm_framebuffer *old_fb) -{ - struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - bool was_interruptible = dev_priv->mm.interruptible; - int ret; - - /* Big Hammer, we also need to ensure that any pending - * MI_WAIT_FOR_EVENT inside a user batch buffer on the - * current scanout is retired before unpinning the old - * framebuffer. Note that we rely on userspace rendering - * into the buffer attached to the pipe they are waiting - * on. If not, userspace generates a GPU hang with IPEHR - * point to the MI_WAIT_FOR_EVENT. - * - * This should only fail upon a hung GPU, in which case we - * can safely continue. - */ - dev_priv->mm.interruptible = false; - ret = i915_gem_object_wait_rendering(obj, true); - dev_priv->mm.interruptible = was_interruptible; - - WARN_ON(ret); -} - static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3386,7 +3351,8 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp; + i915_reg_t reg; + u32 temp; /* enable normal train */ reg = FDI_TX_CTL(pipe); @@ -3428,7 +3394,8 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, tries; + i915_reg_t reg; + u32 temp, tries; /* FDI needs bits from pipe first */ assert_pipe_enabled(dev_priv, pipe); @@ -3528,7 +3495,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, i, retry; + i915_reg_t reg; + u32 temp, i, retry; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -3660,7 +3628,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, i, j; + i915_reg_t reg; + u32 temp, i, j; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -3777,8 +3746,8 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = intel_crtc->pipe; - u32 reg, temp; - + i915_reg_t reg; + u32 temp; /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); @@ -3814,7 +3783,8 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = intel_crtc->pipe; - u32 reg, temp; + i915_reg_t reg; + u32 temp; /* Switch from PCDclk to Rawclk */ reg = FDI_RX_CTL(pipe); @@ -3844,7 +3814,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp; + i915_reg_t reg; + u32 temp; /* disable CPU FDI tx and PCH FDI rx */ reg = FDI_TX_CTL(pipe); @@ -3937,15 +3908,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) work->pending_flip_obj); } -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) +static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + long ret; WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); - if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, - !intel_crtc_has_pending_flip(crtc), - 60*HZ) == 0)) { + + ret = wait_event_interruptible_timeout( + dev_priv->pending_flip_queue, + !intel_crtc_has_pending_flip(crtc), + 60*HZ); + + if (ret < 0) + return ret; + + if (ret == 0) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); spin_lock_irq(&dev->event_lock); @@ -3956,11 +3935,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) spin_unlock_irq(&dev->event_lock); } - if (crtc->primary->fb) { - mutex_lock(&dev->struct_mutex); - intel_finish_fb(crtc->primary->fb); - mutex_unlock(&dev->struct_mutex); - } + return 0; } /* Program iCLKIP clock to the desired frequency */ @@ -4120,6 +4095,22 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) } } +/* Return which DP Port should be selected for Transcoder DP control */ +static enum port +intel_trans_dp_port_sel(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + encoder->type == INTEL_OUTPUT_EDP) + return enc_to_dig_port(&encoder->base)->port; + } + + return -1; +} + /* * Enable PCH resources required for PCH ports: * - PCH PLLs @@ -4134,7 +4125,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp; + u32 temp; assert_pch_transcoder_disabled(dev_priv, pipe); @@ -4181,8 +4172,10 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { + const struct drm_display_mode *adjusted_mode = + &intel_crtc->config->base.adjusted_mode; u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; - reg = TRANS_DP_CTL(pipe); + i915_reg_t reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | TRANS_DP_SYNC_MASK | @@ -4190,19 +4183,19 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) temp |= TRANS_DP_OUTPUT_ENABLE; temp |= bpc << 9; /* same format but at 11:9 */ - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; switch (intel_trans_dp_port_sel(crtc)) { - case PCH_DP_B: + case PORT_B: temp |= TRANS_DP_PORT_SEL_B; break; - case PCH_DP_C: + case PORT_C: temp |= TRANS_DP_PORT_SEL_C; break; - case PCH_DP_D: + case PORT_D: temp |= TRANS_DP_PORT_SEL_D; break; default: @@ -4342,7 +4335,7 @@ static void intel_shared_dpll_commit(struct drm_atomic_state *state) static void cpt_verify_modeset(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - int dslreg = PIPEDSL(pipe); + i915_reg_t dslreg = PIPEDSL(pipe); u32 temp; temp = I915_READ(dslreg); @@ -4652,7 +4645,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) } for (i = 0; i < 256; i++) { - u32 palreg; + i915_reg_t palreg; if (HAS_GMCH_DISPLAY(dev)) palreg = PALETTE(pipe, i); @@ -4731,9 +4724,9 @@ intel_post_enable_primary(struct drm_crtc *crtc) if (IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - /* Underruns don't raise interrupts, so check manually. */ - if (HAS_GMCH_DISPLAY(dev)) - i9xx_check_fifo_underruns(dev_priv); + /* Underruns don't always raise interrupts, so check manually. */ + intel_check_cpu_fifo_underruns(dev_priv); + intel_check_pch_fifo_underruns(dev_priv); } /** @@ -4792,7 +4785,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc) struct intel_crtc_atomic_commit *atomic = &crtc->atomic; struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_plane *plane; if (atomic->wait_vblank) intel_wait_for_vblank(dev, crtc->pipe); @@ -4811,10 +4803,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc) if (atomic->post_enable_primary) intel_post_enable_primary(&crtc->base); - drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks) - intel_update_sprite_watermarks(plane, &crtc->base, - 0, 0, 0, false, false); - memset(atomic, 0, sizeof(*atomic)); } @@ -4823,20 +4811,6 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc_atomic_commit *atomic = &crtc->atomic; - struct drm_plane *p; - - /* Track fb's for any planes being disabled */ - drm_for_each_plane_mask(p, dev, atomic->disabled_planes) { - struct intel_plane *plane = to_intel_plane(p); - - mutex_lock(&dev->struct_mutex); - i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL, - plane->frontbuffer_bit); - mutex_unlock(&dev->struct_mutex); - } - - if (atomic->wait_for_flips) - intel_crtc_wait_for_pending_flips(&crtc->base); if (atomic->disable_fbc) intel_fbc_disable_crtc(crtc); @@ -4885,6 +4859,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) return; if (intel_crtc->config->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); + + if (intel_crtc->config->has_pch_encoder) intel_prepare_shared_dpll(intel_crtc); if (intel_crtc->config->has_dp_encoder) @@ -4902,7 +4879,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -4940,6 +4916,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (HAS_PCH_CPT(dev)) cpt_verify_modeset(dev, intel_crtc->pipe); + + /* Must wait for vblank to avoid spurious PCH FIFO underruns */ + if (intel_crtc->config->has_pch_encoder) + intel_wait_for_vblank(dev, pipe); + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -4962,6 +4943,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (WARN_ON(intel_crtc->active)) return; + if (intel_crtc->config->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + false); + if (intel_crtc_to_shared_dpll(intel_crtc)) intel_enable_shared_dpll(intel_crtc); @@ -4994,11 +4979,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) encoder->pre_enable(encoder); } - if (intel_crtc->config->has_pch_encoder) { - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, - true); + if (intel_crtc->config->has_pch_encoder) dev_priv->display.fdi_link_train(crtc); - } if (!is_dsi) intel_ddi_enable_pipe_clock(intel_crtc); @@ -5035,6 +5017,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_opregion_notify_encoder(encoder, true); } + if (intel_crtc->config->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + true); + /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; @@ -5066,7 +5052,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - u32 reg, temp; + + if (intel_crtc->config->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); for_each_encoder_on_crtc(dev, crtc, encoder) encoder->disable(encoder); @@ -5074,9 +5062,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); - if (intel_crtc->config->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - intel_disable_pipe(intel_crtc); ironlake_pfit_disable(intel_crtc, false); @@ -5092,6 +5077,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_disable_pch_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { + i915_reg_t reg; + u32 temp; + /* disable TRANS_DP_CTL */ reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); @@ -5108,6 +5096,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_fdi_pll_disable(intel_crtc); } + + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } static void haswell_crtc_disable(struct drm_crtc *crtc) @@ -5119,6 +5109,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); + if (intel_crtc->config->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + false); + for_each_encoder_on_crtc(dev, crtc, encoder) { intel_opregion_notify_encoder(encoder, false); encoder->disable(encoder); @@ -5127,9 +5121,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); - if (intel_crtc->config->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, - false); intel_disable_pipe(intel_crtc); if (intel_crtc->config->dp_encoder_is_mst) @@ -5154,6 +5145,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->post_disable) encoder->post_disable(encoder); + + if (intel_crtc->config->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + true); } static void i9xx_pfit_enable(struct intel_crtc *crtc) @@ -5184,21 +5179,41 @@ static enum intel_display_power_domain port_to_power_domain(enum port port) { switch (port) { case PORT_A: - return POWER_DOMAIN_PORT_DDI_A_4_LANES; + return POWER_DOMAIN_PORT_DDI_A_LANES; case PORT_B: - return POWER_DOMAIN_PORT_DDI_B_4_LANES; + return POWER_DOMAIN_PORT_DDI_B_LANES; case PORT_C: - return POWER_DOMAIN_PORT_DDI_C_4_LANES; + return POWER_DOMAIN_PORT_DDI_C_LANES; case PORT_D: - return POWER_DOMAIN_PORT_DDI_D_4_LANES; + return POWER_DOMAIN_PORT_DDI_D_LANES; case PORT_E: - return POWER_DOMAIN_PORT_DDI_E_2_LANES; + return POWER_DOMAIN_PORT_DDI_E_LANES; default: - WARN_ON_ONCE(1); + MISSING_CASE(port); return POWER_DOMAIN_PORT_OTHER; } } +static enum intel_display_power_domain port_to_aux_power_domain(enum port port) +{ + switch (port) { + case PORT_A: + return POWER_DOMAIN_AUX_A; + case PORT_B: + return POWER_DOMAIN_AUX_B; + case PORT_C: + return POWER_DOMAIN_AUX_C; + case PORT_D: + return POWER_DOMAIN_AUX_D; + case PORT_E: + /* FIXME: Check VBT for actual wiring of PORT E */ + return POWER_DOMAIN_AUX_D; + default: + MISSING_CASE(port); + return POWER_DOMAIN_AUX_A; + } +} + #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ if ((1 << (domain)) & (mask)) @@ -5230,6 +5245,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) } } +enum intel_display_power_domain +intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct intel_digital_port *intel_dig_port; + + switch (intel_encoder->type) { + case INTEL_OUTPUT_UNKNOWN: + case INTEL_OUTPUT_HDMI: + /* + * Only DDI platforms should ever use these output types. + * We can get here after the HDMI detect code has already set + * the type of the shared encoder. Since we can't be sure + * what's the status of the given connectors, play safe and + * run the DP detection too. + */ + WARN_ON_ONCE(!HAS_DDI(dev)); + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_EDP: + intel_dig_port = enc_to_dig_port(&intel_encoder->base); + return port_to_aux_power_domain(intel_dig_port->port); + case INTEL_OUTPUT_DP_MST: + intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; + return port_to_aux_power_domain(intel_dig_port->port); + default: + MISSING_CASE(intel_encoder->type); + return POWER_DOMAIN_AUX_A; + } +} + static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -5237,13 +5282,11 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; unsigned long mask; - enum transcoder transcoder; + enum transcoder transcoder = intel_crtc->config->cpu_transcoder; if (!crtc->state->active) return 0; - transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); - mask = BIT(POWER_DOMAIN_PIPE(pipe)); mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); if (intel_crtc->config->pch_pfit.enabled || @@ -5330,7 +5373,7 @@ static void intel_update_max_cdclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; if (limit == SKL_DFSM_CDCLK_LIMIT_675) @@ -5747,32 +5790,16 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv) if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) DRM_ERROR("DBuf power disable timeout\n"); - /* - * DMC assumes ownership of LCPLL and will get confused if we touch it. - */ - if (dev_priv->csr.dmc_payload) { - /* disable DPLL0 */ - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & - ~LCPLL_PLL_ENABLE); - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) - DRM_ERROR("Couldn't disable DPLL0\n"); - } - - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + /* disable DPLL0 */ + I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); + if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) + DRM_ERROR("Couldn't disable DPLL0\n"); } void skl_init_cdclk(struct drm_i915_private *dev_priv) { - u32 val; unsigned int required_vco; - /* enable PCH reset handshake */ - val = I915_READ(HSW_NDE_RSTWRN_OPT); - I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); - - /* enable PG1 and Misc I/O */ - intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); - /* DPLL0 not enabled (happens on early BIOS versions) */ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { /* enable DPLL0 */ @@ -5793,6 +5820,45 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv) DRM_ERROR("DBuf power enable timeout\n"); } +int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +{ + uint32_t lcpll1 = I915_READ(LCPLL1_CTL); + uint32_t cdctl = I915_READ(CDCLK_CTL); + int freq = dev_priv->skl_boot_cdclk; + + /* + * check if the pre-os intialized the display + * There is SWF18 scratchpad register defined which is set by the + * pre-os which can be used by the OS drivers to check the status + */ + if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) + goto sanitize; + + /* Is PLL enabled and locked ? */ + if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) + goto sanitize; + + /* DPLL okay; verify the cdclock + * + * Noticed in some instances that the freq selection is correct but + * decimal part is programmed wrong from BIOS where pre-os does not + * enable display. Verify the same as well. + */ + if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) + /* All well; nothing to sanitize */ + return false; +sanitize: + /* + * As of now initialize with max cdclk till + * we get dynamic cdclk support + * */ + dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; + skl_init_cdclk(dev_priv); + + /* we did have to sanitize */ + return true; +} + /* Adjust CDclk dividers to allow high res or save power if possible */ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) { @@ -6257,7 +6323,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; if (to_intel_plane_state(crtc->primary->state)->visible) { - intel_crtc_wait_for_pending_flips(crtc); + WARN_ON(intel_crtc->unpin_work); + intel_pre_disable_primary(crtc); } @@ -6575,6 +6642,15 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, pipe_config_supports_ips(dev_priv, pipe_config); } +static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) +{ + const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* GDG double wide on either pipe, otherwise pipe A only */ + return INTEL_INFO(dev_priv)->gen < 4 && + (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); +} + static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -6584,23 +6660,24 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, /* FIXME should check pixel clock limits on all platforms */ if (INTEL_INFO(dev)->gen < 4) { - int clock_limit = dev_priv->max_cdclk_freq; + int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* - * Enable pixel doubling when the dot clock + * Enable double wide mode when the dot clock * is > 90% of the (display) core speed. - * - * GDG double wide on either pipe, - * otherwise pipe A only. */ - if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && - adjusted_mode->crtc_clock > clock_limit * 9 / 10) { + if (intel_crtc_supports_double_wide(crtc) && + adjusted_mode->crtc_clock > clock_limit) { clock_limit *= 2; pipe_config->double_wide = true; } - if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) + if (adjusted_mode->crtc_clock > clock_limit) { + DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", + adjusted_mode->crtc_clock, clock_limit, + yesno(pipe_config->double_wide)); return -EINVAL; + } } /* @@ -7365,7 +7442,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = crtc->pipe; - int dpll_reg = DPLL(crtc->pipe); + i915_reg_t dpll_reg = DPLL(crtc->pipe); enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 loopfilter, tribuf_calcntr; u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; @@ -9283,8 +9360,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); - I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); - I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); + I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); + I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); @@ -9746,7 +9823,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) skylake_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_BROXTON(dev)) bxt_get_ddi_pll(dev_priv, port, pipe_config); @@ -10092,20 +10169,17 @@ __intel_framebuffer_create(struct drm_device *dev, int ret; intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) { - drm_gem_object_unreference(&obj->base); + if (!intel_fb) return ERR_PTR(-ENOMEM); - } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) goto err; return &intel_fb->base; + err: - drm_gem_object_unreference(&obj->base); kfree(intel_fb); - return ERR_PTR(ret); } @@ -10145,6 +10219,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_display_mode *mode, int depth, int bpp) { + struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; @@ -10159,7 +10234,11 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, bpp); mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); - return intel_framebuffer_create(dev, &mode_cmd, obj); + fb = intel_framebuffer_create(dev, &mode_cmd, obj); + if (IS_ERR(fb)) + drm_gem_object_unreference_unlocked(&obj->base); + + return fb; } static struct drm_framebuffer * @@ -11062,7 +11141,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, */ if (ring->id == RCS) { intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, DERRMR); + intel_ring_emit_reg(ring, DERRMR); intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEC_PRI_FLIP_DONE)); @@ -11072,7 +11151,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, else intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit(ring, DERRMR); + intel_ring_emit_reg(ring, DERRMR); intel_ring_emit(ring, ring->scratch.gtt_offset + 256); if (IS_GEN8(dev)) { intel_ring_emit(ring, 0); @@ -11117,13 +11196,14 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, + unsigned int rotation, struct intel_unpin_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = intel_crtc->base.primary->fb; const enum pipe pipe = intel_crtc->pipe; - u32 ctl, stride; + u32 ctl, stride, tile_height; ctl = I915_READ(PLANE_CTL(pipe, 0)); ctl &= ~PLANE_CTL_TILED_MASK; @@ -11147,9 +11227,16 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, * The stride is either expressed as a multiple of 64 bytes chunks for * linear buffers or in number of tiles for tiled buffers. */ - stride = fb->pitches[0] / - intel_fb_stride_alignment(dev, fb->modifier[0], - fb->pixel_format); + if (intel_rotation_90_or_270(rotation)) { + /* stride = Surface height in tiles */ + tile_height = intel_tile_height(dev, fb->pixel_format, + fb->modifier[0], 0); + stride = DIV_ROUND_UP(fb->height, tile_height); + } else { + stride = fb->pitches[0] / + intel_fb_stride_alignment(dev, fb->modifier[0], + fb->pixel_format); + } /* * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on @@ -11170,10 +11257,9 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, struct intel_framebuffer *intel_fb = to_intel_framebuffer(intel_crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; + i915_reg_t reg = DSPCNTR(intel_crtc->plane); u32 dspcntr; - u32 reg; - reg = DSPCNTR(intel_crtc->plane); dspcntr = I915_READ(reg); if (obj->tiling_mode != I915_TILING_NONE) @@ -11207,7 +11293,7 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) intel_pipe_update_start(crtc); if (INTEL_INFO(mmio_flip->i915)->gen >= 9) - skl_do_mmio_flip(crtc, work); + skl_do_mmio_flip(crtc, mmio_flip->rotation, work); else /* use_mmio_flip() retricts MMIO flips to ilk+ */ ilk_do_mmio_flip(crtc, work); @@ -11234,10 +11320,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work) static int intel_queue_mmio_flip(struct drm_device *dev, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring, - uint32_t flags) + struct drm_i915_gem_object *obj) { struct intel_mmio_flip *mmio_flip; @@ -11248,6 +11331,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev, mmio_flip->i915 = to_i915(dev); mmio_flip->req = i915_gem_request_reference(obj->last_write_req); mmio_flip->crtc = to_intel_crtc(crtc); + mmio_flip->rotation = crtc->primary->state->rotation; INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); schedule_work(&mmio_flip->work); @@ -11453,9 +11537,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * synchronisation, so all we want here is to pin the framebuffer * into the display plane and skip any waits. */ + if (!mmio_flip) { + ret = i915_gem_object_sync(obj, ring, &request); + if (ret) + goto cleanup_pending; + } + ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, - crtc->primary->state, - mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request); + crtc->primary->state); if (ret) goto cleanup_pending; @@ -11464,8 +11553,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset += intel_crtc->dspaddr_offset; if (mmio_flip) { - ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, - page_flip_flags); + ret = intel_queue_mmio_flip(dev, crtc, obj); if (ret) goto cleanup_unpin; @@ -11579,18 +11667,32 @@ retry: static bool intel_wm_need_update(struct drm_plane *plane, struct drm_plane_state *state) { - /* Update watermarks on tiling changes. */ + struct intel_plane_state *new = to_intel_plane_state(state); + struct intel_plane_state *cur = to_intel_plane_state(plane->state); + + /* Update watermarks on tiling or size changes. */ if (!plane->state->fb || !state->fb || plane->state->fb->modifier[0] != state->fb->modifier[0] || - plane->state->rotation != state->rotation) - return true; - - if (plane->state->crtc_w != state->crtc_w) + plane->state->rotation != state->rotation || + drm_rect_width(&new->src) != drm_rect_width(&cur->src) || + drm_rect_height(&new->src) != drm_rect_height(&cur->src) || + drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || + drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) return true; return false; } +static bool needs_scaling(struct intel_plane_state *state) +{ + int src_w = drm_rect_width(&state->src) >> 16; + int src_h = drm_rect_height(&state->src) >> 16; + int dst_w = drm_rect_width(&state->dst); + int dst_h = drm_rect_height(&state->dst); + + return (src_w != dst_w || src_h != dst_h); +} + int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { @@ -11606,7 +11708,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = crtc->state->active; bool is_crtc_enabled = crtc_state->active; - bool turn_off, turn_on, visible, was_visible; struct drm_framebuffer *fb = plane_state->fb; @@ -11619,14 +11720,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, return ret; } - /* - * Disabling a plane is always okay; we just need to update - * fb tracking in a special way since cleanup_fb() won't - * get called by the plane helpers. - */ - if (old_plane_state->base.fb && !fb) - intel_crtc->atomic.disabled_planes |= 1 << i; - was_visible = old_plane_state->visible; visible = to_intel_plane_state(plane_state)->visible; @@ -11676,7 +11769,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: - intel_crtc->atomic.wait_for_flips = true; intel_crtc->atomic.pre_disable_primary = turn_off; intel_crtc->atomic.post_enable_primary = turn_on; @@ -11724,11 +11816,23 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, case DRM_PLANE_TYPE_CURSOR: break; case DRM_PLANE_TYPE_OVERLAY: - if (turn_off && !mode_changed) { + /* + * WaCxSRDisabledForSpriteScaling:ivb + * + * cstate->update_wm was already set above, so this flag will + * take effect when we commit and program watermarks. + */ + if (IS_IVYBRIDGE(dev) && + needs_scaling(to_intel_plane_state(plane_state)) && + !needs_scaling(old_plane_state)) { + to_intel_crtc_state(crtc_state)->disable_lp_wm = true; + } else if (turn_off && !mode_changed) { intel_crtc->atomic.wait_vblank = true; intel_crtc->atomic.update_sprite_watermarks |= 1 << i; } + + break; } return 0; } @@ -11813,6 +11917,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } ret = 0; + if (dev_priv->display.compute_pipe_wm) { + ret = dev_priv->display.compute_pipe_wm(intel_crtc, state); + if (ret) + return ret; + } + if (INTEL_INFO(dev)->gen >= 9) { if (mode_changed) ret = skl_update_scaler_crtc(pipe_config); @@ -12002,7 +12112,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->dpll_hw_state.pll9, pipe_config->dpll_hw_state.pll10, pipe_config->dpll_hw_state.pcsdw12); - } else if (IS_SKYLAKE(dev)) { + } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", pipe_config->ddi_pll_sel, @@ -12256,6 +12366,18 @@ intel_modeset_update_crtc_state(struct drm_atomic_state *state) crtc->hwmode = crtc->state->adjusted_mode; else crtc->hwmode.crtc_clock = 0; + + /* + * Update legacy state to satisfy fbc code. This can + * be removed when fbc uses the atomic state. + */ + if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { + struct drm_plane_state *plane_state = crtc->primary->state; + + crtc->primary->fb = plane_state->fb; + crtc->x = plane_state->src_x >> 16; + crtc->y = plane_state->src_y >> 16; + } } } @@ -12281,7 +12403,7 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2) list_for_each_entry((intel_crtc), \ &(dev)->mode_config.crtc_list, \ base.head) \ - if (mask & (1 <<(intel_crtc)->pipe)) + for_each_if (mask & (1 <<(intel_crtc)->pipe)) static bool intel_compare_m_n(unsigned int m, unsigned int n, @@ -12460,7 +12582,6 @@ intel_pipe_config_compare(struct drm_device *dev, if (INTEL_INFO(dev)->gen < 8) { PIPE_CONF_CHECK_M_N(dp_m_n); - PIPE_CONF_CHECK_I(has_drrs); if (current_config->has_drrs) PIPE_CONF_CHECK_M_N(dp_m2_n2); } else @@ -13020,6 +13141,45 @@ static int intel_modeset_checks(struct drm_atomic_state *state) return 0; } +/* + * Handle calculation of various watermark data at the end of the atomic check + * phase. The code here should be run after the per-crtc and per-plane 'check' + * handlers to ensure that all derived state has been updated. + */ +static void calc_watermark_data(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + struct drm_plane *plane; + struct drm_plane_state *pstate; + + /* + * Calculate watermark configuration details now that derived + * plane/crtc state is all properly updated. + */ + drm_for_each_crtc(crtc, dev) { + cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: + crtc->state; + + if (cstate->active) + intel_state->wm_config.num_pipes_active++; + } + drm_for_each_legacy_plane(plane, dev) { + pstate = drm_atomic_get_existing_plane_state(state, plane) ?: + plane->state; + + if (!to_intel_plane_state(pstate)->visible) + continue; + + intel_state->wm_config.sprites_enabled = true; + if (pstate->crtc_w != pstate->src_w >> 16 || + pstate->crtc_h != pstate->src_h >> 16) + intel_state->wm_config.sprites_scaled = true; + } +} + /** * intel_atomic_check - validate state object * @dev: drm device @@ -13028,6 +13188,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) static int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int ret, i; @@ -13095,10 +13256,81 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; } else - to_intel_atomic_state(state)->cdclk = - to_i915(state->dev)->cdclk_freq; + intel_state->cdclk = to_i915(state->dev)->cdclk_freq; + + ret = drm_atomic_helper_check_planes(state->dev, state); + if (ret) + return ret; + + calc_watermark_data(state); + + return 0; +} + +static int intel_atomic_prepare_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool async) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_plane_state *plane_state; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i, ret; + + if (async) { + DRM_DEBUG_KMS("i915 does not yet support async commit\n"); + return -EINVAL; + } - return drm_atomic_helper_check_planes(state->dev, state); + for_each_crtc_in_state(state, crtc, crtc_state, i) { + ret = intel_crtc_wait_for_pending_flips(crtc); + if (ret) + return ret; + + if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) + flush_workqueue(dev_priv->wq); + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + ret = drm_atomic_helper_prepare_planes(dev, state); + if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { + u32 reset_counter; + + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + mutex_unlock(&dev->struct_mutex); + + for_each_plane_in_state(state, plane, plane_state, i) { + struct intel_plane_state *intel_plane_state = + to_intel_plane_state(plane_state); + + if (!intel_plane_state->wait_req) + continue; + + ret = __i915_wait_request(intel_plane_state->wait_req, + reset_counter, true, + NULL, NULL); + + /* Swallow -EIO errors to allow updates during hw lockup. */ + if (ret == -EIO) + ret = 0; + + if (ret) + break; + } + + if (!ret) + return 0; + + mutex_lock(&dev->struct_mutex); + drm_atomic_helper_cleanup_planes(dev, state); + } + + mutex_unlock(&dev->struct_mutex); + return ret; } /** @@ -13122,22 +13354,20 @@ static int intel_atomic_commit(struct drm_device *dev, bool async) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; int ret = 0; int i; bool any_ms = false; - if (async) { - DRM_DEBUG_KMS("i915 does not yet support async commit\n"); - return -EINVAL; - } - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) + ret = intel_atomic_prepare_commit(dev, state, async); + if (ret) { + DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); return ret; + } drm_atomic_helper_swap_state(dev, state); + dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; for_each_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -13175,6 +13405,9 @@ static int intel_atomic_commit(struct drm_device *dev, to_intel_crtc_state(crtc->state)->update_pipe; unsigned long put_domains = 0; + if (modeset) + intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); + if (modeset && crtc->state->active) { update_scanline_offset(to_intel_crtc(crtc)); dev_priv->display.crtc_enable(crtc); @@ -13190,18 +13423,26 @@ static int intel_atomic_commit(struct drm_device *dev, if (!modeset) intel_pre_plane_update(intel_crtc); - drm_atomic_helper_commit_planes_on_crtc(crtc_state); + if (crtc->state->active && + (crtc->state->planes_changed || update_pipe)) + drm_atomic_helper_commit_planes_on_crtc(crtc_state); if (put_domains) modeset_put_power_domains(dev_priv, put_domains); intel_post_plane_update(intel_crtc); + + if (modeset) + intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); } /* FIXME: add subpixel order */ drm_atomic_helper_wait_for_vblanks(dev, state); + + mutex_lock(&dev->struct_mutex); drm_atomic_helper_cleanup_planes(dev, state); + mutex_unlock(&dev->struct_mutex); if (any_ms) intel_modeset_check_state(dev, state); @@ -13370,6 +13611,8 @@ static void intel_shared_dpll_init(struct drm_device *dev) * bits. Some older platforms need special physical address handling for * cursor planes. * + * Must be called with struct_mutex held. + * * Returns 0 on success, negative error code on failure. */ int @@ -13380,28 +13623,58 @@ intel_prepare_plane_fb(struct drm_plane *plane, struct drm_framebuffer *fb = new_state->fb; struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); int ret = 0; - if (!obj) + if (!obj && !old_obj) return 0; - mutex_lock(&dev->struct_mutex); + if (old_obj) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the + * current scanout is retired before unpinning the old + * framebuffer. Note that we rely on userspace rendering + * into the buffer attached to the pipe they are waiting + * on. If not, userspace generates a GPU hang with IPEHR + * point to the MI_WAIT_FOR_EVENT. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. + */ + if (needs_modeset(crtc_state)) + ret = i915_gem_object_wait_rendering(old_obj, true); - if (plane->type == DRM_PLANE_TYPE_CURSOR && + /* Swallow -EIO errors to allow updates during hw lockup. */ + if (ret && ret != -EIO) + return ret; + } + + if (!obj) { + ret = 0; + } else if (plane->type == DRM_PLANE_TYPE_CURSOR && INTEL_INFO(dev)->cursor_needs_physical) { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); if (ret) DRM_DEBUG_KMS("failed to attach phys object\n"); } else { - ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL); + ret = intel_pin_and_fence_fb_obj(plane, fb, new_state); } - if (ret == 0) - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + if (ret == 0) { + if (obj) { + struct intel_plane_state *plane_state = + to_intel_plane_state(new_state); - mutex_unlock(&dev->struct_mutex); + i915_gem_request_assign(&plane_state->wait_req, + obj->last_write_req); + } + + i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + } return ret; } @@ -13412,23 +13685,35 @@ intel_prepare_plane_fb(struct drm_plane *plane, * @fb: old framebuffer that was on plane * * Cleans up a framebuffer that has just been removed from a plane. + * + * Must be called with struct_mutex held. */ void intel_cleanup_plane_fb(struct drm_plane *plane, const struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; - struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb); + struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_plane_state *old_intel_state; + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); + struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); - if (!obj) + old_intel_state = to_intel_plane_state(old_state); + + if (!obj && !old_obj) return; - if (plane->type != DRM_PLANE_TYPE_CURSOR || - !INTEL_INFO(dev)->cursor_needs_physical) { - mutex_lock(&dev->struct_mutex); + if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || + !INTEL_INFO(dev)->cursor_needs_physical)) intel_unpin_fb_obj(old_state->fb, old_state); - mutex_unlock(&dev->struct_mutex); - } + + /* prepare_fb aborted? */ + if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || + (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) + i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + + i915_gem_request_assign(&old_intel_state->wait_req, NULL); + } int @@ -13447,7 +13732,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; - if (!crtc_clock || !cdclk) + if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock)) return DRM_PLANE_HELPER_NO_SCALING; /* @@ -13495,18 +13780,8 @@ intel_commit_primary_plane(struct drm_plane *plane, struct drm_framebuffer *fb = state->base.fb; struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc; - struct drm_rect *src = &state->src; crtc = crtc ? crtc : plane->crtc; - intel_crtc = to_intel_crtc(crtc); - - plane->fb = fb; - crtc->x = src->x1 >> 16; - crtc->y = src->y1 >> 16; - - if (!crtc->state->active) - return; dev_priv->display.update_primary_plane(crtc, fb, state->src.x1 >> 16, @@ -13536,8 +13811,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, intel_update_watermarks(crtc); /* Perform vblank evasion around commit operation */ - if (crtc->state->active) - intel_pipe_update_start(intel_crtc); + intel_pipe_update_start(intel_crtc); if (modeset) return; @@ -13553,8 +13827,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (crtc->state->active) - intel_pipe_update_end(intel_crtc); + intel_pipe_update_end(intel_crtc); } /** @@ -13737,8 +14010,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, intel_crtc->cursor_bo = obj; update: - if (crtc->state->active) - intel_crtc_update_cursor(crtc, state->visible); + intel_crtc_update_cursor(crtc, state->visible); } static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, @@ -14010,7 +14282,7 @@ static void intel_setup_outputs(struct drm_device *dev) */ found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; /* WaIgnoreDDIAStrap: skl */ - if (found || IS_SKYLAKE(dev)) + if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) intel_ddi_init(dev, PORT_A); /* DDI B, C and D detection is indicated by the SFUSE_STRAP @@ -14026,7 +14298,7 @@ static void intel_setup_outputs(struct drm_device *dev) /* * On SKL we don't have a way to detect DDI-E so we rely on VBT. */ - if (IS_SKYLAKE(dev) && + if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) @@ -14041,7 +14313,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ - found = intel_sdvo_init(dev, PCH_SDVOB, true); + found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); if (!found) intel_hdmi_init(dev, PCH_HDMIB, PORT_B); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) @@ -14097,7 +14369,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); - found = intel_sdvo_init(dev, GEN3_SDVOB, true); + found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); if (!found && IS_G4X(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); @@ -14111,7 +14383,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOC\n"); - found = intel_sdvo_init(dev, GEN3_SDVOC, false); + found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); } if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { @@ -14377,8 +14649,9 @@ static int intel_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *user_mode_cmd) + const struct drm_mode_fb_cmd2 *user_mode_cmd) { + struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; @@ -14387,7 +14660,11 @@ intel_user_framebuffer_create(struct drm_device *dev, if (&obj->base == NULL) return ERR_PTR(-ENOENT); - return intel_framebuffer_create(dev, &mode_cmd, obj); + fb = intel_framebuffer_create(dev, &mode_cmd, obj); + if (IS_ERR(fb)) + drm_gem_object_unreference_unlocked(&obj->base); + + return fb; } #ifndef CONFIG_DRM_FBDEV_EMULATION @@ -14472,7 +14749,7 @@ static void intel_init_display(struct drm_device *dev) } /* Returns the core display clock speed */ - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) dev_priv->display.get_display_clock_speed = skylake_get_display_clock_speed; else if (IS_BROXTON(dev)) @@ -14761,7 +15038,7 @@ static void i915_disable_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u8 sr1; - u32 vga_reg = i915_vgacntrl_reg(dev); + i915_reg_t vga_reg = i915_vgacntrl_reg(dev); /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); @@ -14877,9 +15154,6 @@ void intel_modeset_init(struct drm_device *dev) i915_disable_vga(dev); intel_setup_outputs(dev); - /* Just in case the BIOS is doing something questionable. */ - intel_fbc_disable(dev_priv); - drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev); drm_modeset_unlock_all(dev); @@ -14966,10 +15240,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; + i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder); /* Clear any frame start delays used for debugging left by the BIOS */ - reg = PIPECONF(crtc->config->cpu_transcoder); I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); /* restore vblank interrupts to correct state */ @@ -15123,7 +15396,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) void i915_redisable_vga_power_on(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 vga_reg = i915_vgacntrl_reg(dev); + i915_reg_t vga_reg = i915_vgacntrl_reg(dev); if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); @@ -15162,7 +15435,7 @@ static void readout_plane_state(struct intel_crtc *crtc) struct intel_plane_state *plane_state = to_intel_plane_state(primary->state); - plane_state->visible = + plane_state->visible = crtc->active && primary_get_hw_state(to_intel_plane(primary)); if (plane_state->visible) @@ -15419,8 +15692,7 @@ void intel_modeset_gem_init(struct drm_device *dev) mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(c->primary, c->primary->fb, - c->primary->state, - NULL, NULL); + c->primary->state); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("failed to pin boot fb on pipe %d\n", diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09bdd94ca3ba..e1456ead5c53 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp) * See vlv_power_sequencer_reset() why we need * a power domain reference here. */ - power_domain = intel_display_port_power_domain(encoder); + power_domain = intel_display_port_aux_power_domain(encoder); intel_display_power_get(dev_priv, power_domain); mutex_lock(&dev_priv->pps_mutex); @@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp) mutex_unlock(&dev_priv->pps_mutex); - power_domain = intel_display_port_power_domain(encoder); + power_domain = intel_display_port_aux_power_domain(encoder); intel_display_power_put(dev_priv, power_domain); } @@ -541,7 +541,8 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) } } -static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) +static i915_reg_t +_pp_ctrl_reg(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -553,7 +554,8 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); } -static u32 _pp_stat_reg(struct intel_dp *intel_dp) +static i915_reg_t +_pp_stat_reg(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -582,7 +584,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, if (IS_VALLEYVIEW(dev)) { enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); - u32 pp_ctrl_reg, pp_div_reg; + i915_reg_t pp_ctrl_reg, pp_div_reg; u32 pp_div; pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); @@ -652,7 +654,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; + i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t status; bool done; @@ -750,7 +752,7 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, else precharge = 5; - if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) + if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A) timeout = DP_AUX_CH_CTL_TIME_OUT_600us; else timeout = DP_AUX_CH_CTL_TIME_OUT_400us; @@ -789,8 +791,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; - uint32_t ch_data = ch_ctl + 4; + i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t aux_clock_divider; int i, ret, recv_bytes; uint32_t status; @@ -816,8 +817,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, intel_dp_check_edp(intel_dp); - intel_aux_display_runtime_get(dev_priv); - /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = I915_READ_NOTRACE(ch_ctl); @@ -856,7 +855,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) - I915_WRITE(ch_data + i, + I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2], intel_dp_pack_aux(send + i, send_bytes - i)); @@ -920,13 +919,12 @@ done: recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) - intel_dp_unpack_aux(I915_READ(ch_data + i), + intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]), recv + i, recv_bytes - i); ret = recv_bytes; out: pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); - intel_aux_display_runtime_put(dev_priv); if (vdd) edp_panel_vdd_off(intel_dp, false); @@ -1008,96 +1006,206 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return ret; } -static void -intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) +static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum port port) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - enum port port = intel_dig_port->port; - struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; - const char *name = NULL; - uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL; - int ret; + switch (port) { + case PORT_B: + case PORT_C: + case PORT_D: + return DP_AUX_CH_CTL(port); + default: + MISSING_CASE(port); + return DP_AUX_CH_CTL(PORT_B); + } +} - /* On SKL we don't have Aux for port E so we rely on VBT to set - * a proper alternate aux channel. - */ - if (IS_SKYLAKE(dev) && port == PORT_E) { - switch (info->alternate_aux_channel) { - case DP_AUX_B: - porte_aux_ctl_reg = DPB_AUX_CH_CTL; - break; - case DP_AUX_C: - porte_aux_ctl_reg = DPC_AUX_CH_CTL; - break; - case DP_AUX_D: - porte_aux_ctl_reg = DPD_AUX_CH_CTL; - break; - case DP_AUX_A: - default: - porte_aux_ctl_reg = DPA_AUX_CH_CTL; - } +static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv, + enum port port, int index) +{ + switch (port) { + case PORT_B: + case PORT_C: + case PORT_D: + return DP_AUX_CH_DATA(port, index); + default: + MISSING_CASE(port); + return DP_AUX_CH_DATA(PORT_B, index); } +} +static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum port port) +{ switch (port) { case PORT_A: - intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; - name = "DPDDC-A"; - break; + return DP_AUX_CH_CTL(port); case PORT_B: - intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; - name = "DPDDC-B"; - break; case PORT_C: - intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; - name = "DPDDC-C"; - break; case PORT_D: - intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; - name = "DPDDC-D"; - break; - case PORT_E: - intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg; - name = "DPDDC-E"; - break; + return PCH_DP_AUX_CH_CTL(port); default: - BUG(); + MISSING_CASE(port); + return DP_AUX_CH_CTL(PORT_A); } +} - /* - * The AUX_CTL register is usually DP_CTL + 0x10. - * - * On Haswell and Broadwell though: - * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU - * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU - * - * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU. - */ - if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E) - intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; +static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv, + enum port port, int index) +{ + switch (port) { + case PORT_A: + return DP_AUX_CH_DATA(port, index); + case PORT_B: + case PORT_C: + case PORT_D: + return PCH_DP_AUX_CH_DATA(port, index); + default: + MISSING_CASE(port); + return DP_AUX_CH_DATA(PORT_A, index); + } +} + +/* + * On SKL we don't have Aux for port E so we rely + * on VBT to set a proper alternate aux channel. + */ +static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[PORT_E]; + + switch (info->alternate_aux_channel) { + case DP_AUX_A: + return PORT_A; + case DP_AUX_B: + return PORT_B; + case DP_AUX_C: + return PORT_C; + case DP_AUX_D: + return PORT_D; + default: + MISSING_CASE(info->alternate_aux_channel); + return PORT_A; + } +} + +static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port == PORT_E) + port = skl_porte_aux_port(dev_priv); + + switch (port) { + case PORT_A: + case PORT_B: + case PORT_C: + case PORT_D: + return DP_AUX_CH_CTL(port); + default: + MISSING_CASE(port); + return DP_AUX_CH_CTL(PORT_A); + } +} + +static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, + enum port port, int index) +{ + if (port == PORT_E) + port = skl_porte_aux_port(dev_priv); + + switch (port) { + case PORT_A: + case PORT_B: + case PORT_C: + case PORT_D: + return DP_AUX_CH_DATA(port, index); + default: + MISSING_CASE(port); + return DP_AUX_CH_DATA(PORT_A, index); + } +} + +static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum port port) +{ + if (INTEL_INFO(dev_priv)->gen >= 9) + return skl_aux_ctl_reg(dev_priv, port); + else if (HAS_PCH_SPLIT(dev_priv)) + return ilk_aux_ctl_reg(dev_priv, port); + else + return g4x_aux_ctl_reg(dev_priv, port); +} + +static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, + enum port port, int index) +{ + if (INTEL_INFO(dev_priv)->gen >= 9) + return skl_aux_data_reg(dev_priv, port, index); + else if (HAS_PCH_SPLIT(dev_priv)) + return ilk_aux_data_reg(dev_priv, port, index); + else + return g4x_aux_data_reg(dev_priv, port, index); +} + +static void intel_aux_reg_init(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + enum port port = dp_to_dig_port(intel_dp)->port; + int i; + + intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); + for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++) + intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i); +} + +static void +intel_dp_aux_fini(struct intel_dp *intel_dp) +{ + drm_dp_aux_unregister(&intel_dp->aux); + kfree(intel_dp->aux.name); +} + +static int +intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum port port = intel_dig_port->port; + int ret; + + intel_aux_reg_init(intel_dp); + + intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); + if (!intel_dp->aux.name) + return -ENOMEM; - intel_dp->aux.name = name; intel_dp->aux.dev = dev->dev; intel_dp->aux.transfer = intel_dp_aux_transfer; - DRM_DEBUG_KMS("registering %s bus for %s\n", name, + DRM_DEBUG_KMS("registering %s bus for %s\n", + intel_dp->aux.name, connector->base.kdev->kobj.name); ret = drm_dp_aux_register(&intel_dp->aux); if (ret < 0) { DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n", - name, ret); - return; + intel_dp->aux.name, ret); + kfree(intel_dp->aux.name); + return ret; } ret = sysfs_create_link(&connector->base.kdev->kobj, &intel_dp->aux.ddc.dev.kobj, intel_dp->aux.ddc.dev.kobj.name); if (ret < 0) { - DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); - drm_dp_aux_unregister(&intel_dp->aux); + DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", + intel_dp->aux.name, ret); + intel_dp_aux_fini(intel_dp); + return ret; } + + return 0; } static void @@ -1189,10 +1297,13 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; } -static bool intel_dp_source_supports_hbr2(struct drm_device *dev) +bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) { + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + /* WaDisableHBR2:skl */ - if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) + if (IS_SKL_REVID(dev, 0, SKL_REVID_B0)) return false; if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || @@ -1203,14 +1314,16 @@ static bool intel_dp_source_supports_hbr2(struct drm_device *dev) } static int -intel_dp_source_rates(struct drm_device *dev, const int **source_rates) +intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates) { + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; int size; if (IS_BROXTON(dev)) { *source_rates = bxt_rates; size = ARRAY_SIZE(bxt_rates); - } else if (IS_SKYLAKE(dev)) { + } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { *source_rates = skl_rates; size = ARRAY_SIZE(skl_rates); } else { @@ -1219,7 +1332,7 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates) } /* This depends on the fact that 5.4 is last value in the array */ - if (!intel_dp_source_supports_hbr2(dev)) + if (!intel_dp_source_supports_hbr2(intel_dp)) size--; return size; @@ -1284,12 +1397,11 @@ static int intersect_rates(const int *source_rates, int source_len, static int intel_dp_common_rates(struct intel_dp *intel_dp, int *common_rates) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); const int *source_rates, *sink_rates; int source_len, sink_len; sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); - source_len = intel_dp_source_rates(dev, &source_rates); + source_len = intel_dp_source_rates(intel_dp, &source_rates); return intersect_rates(source_rates, source_len, sink_rates, sink_len, @@ -1314,7 +1426,6 @@ static void snprintf_int_array(char *str, size_t len, static void intel_dp_print_rates(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); const int *source_rates, *sink_rates; int source_len, sink_len, common_len; int common_rates[DP_MAX_SUPPORTED_RATES]; @@ -1323,7 +1434,7 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) if ((drm_debug & DRM_UT_KMS) == 0) return; - source_len = intel_dp_source_rates(dev, &source_rates); + source_len = intel_dp_source_rates(intel_dp, &source_rates); snprintf_int_array(str, sizeof(str), source_rates, source_len); DRM_DEBUG_KMS("source rates: %s\n", str); @@ -1365,8 +1476,8 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) return rate_to_index(rate, intel_dp->sink_rates); } -static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, - uint8_t *link_bw, uint8_t *rate_select) +void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, + uint8_t *link_bw, uint8_t *rate_select) { if (intel_dp->num_sink_rates) { *link_bw = 0; @@ -1426,7 +1537,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, return ret; } - if (!HAS_PCH_SPLIT(dev)) + if (HAS_GMCH_DISPLAY(dev)) intel_gmch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); else @@ -1530,7 +1641,7 @@ found: &pipe_config->dp_m2_n2); } - if (IS_SKYLAKE(dev) && is_edp(intel_dp)) + if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp)) skl_edp_set_pll_config(pipe_config); else if (IS_BROXTON(dev)) /* handled in ddi */; @@ -1542,37 +1653,6 @@ found: return true; } -static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpa_ctl; - - DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", - crtc->config->port_clock); - dpa_ctl = I915_READ(DP_A); - dpa_ctl &= ~DP_PLL_FREQ_MASK; - - if (crtc->config->port_clock == 162000) { - /* For a long time we've carried around a ILK-DevA w/a for the - * 160MHz clock. If we're really unlucky, it's still required. - */ - DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); - dpa_ctl |= DP_PLL_FREQ_160MHZ; - intel_dp->DP |= DP_PLL_FREQ_160MHZ; - } else { - dpa_ctl |= DP_PLL_FREQ_270MHZ; - intel_dp->DP |= DP_PLL_FREQ_270MHZ; - } - - I915_WRITE(DP_A, dpa_ctl); - - POSTING_READ(DP_A); - udelay(500); -} - void intel_dp_set_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { @@ -1617,9 +1697,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder) intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count); - if (crtc->config->has_audio) - intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - /* Split out the IBX/CPU vs CPT settings */ if (IS_GEN7(dev) && port == PORT_A) { @@ -1680,7 +1757,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_stat_reg, pp_ctrl_reg; + i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1770,7 +1847,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; enum intel_display_power_domain power_domain; u32 pp; - u32 pp_stat_reg, pp_ctrl_reg; + i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->want_panel_vdd; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1784,7 +1861,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (edp_have_panel_vdd(intel_dp)) return need_to_disable; - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", @@ -1846,7 +1923,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) struct intel_encoder *intel_encoder = &intel_dig_port->base; enum intel_display_power_domain power_domain; u32 pp; - u32 pp_stat_reg, pp_ctrl_reg; + i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1874,7 +1951,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if ((pp & POWER_TARGET_ON) == 0) intel_dp->last_power_cycle = jiffies; - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); } @@ -1933,7 +2010,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; - u32 pp_ctrl_reg; + i915_reg_t pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1995,7 +2072,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; enum intel_display_power_domain power_domain; u32 pp; - u32 pp_ctrl_reg; + i915_reg_t pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2025,7 +2102,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) wait_panel_off(intel_dp); /* We got a reference when we enabled the VDD. */ - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); } @@ -2046,7 +2123,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; - u32 pp_ctrl_reg; + i915_reg_t pp_ctrl_reg; /* * If we enable the backlight right away following a panel power @@ -2087,7 +2164,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; - u32 pp_ctrl_reg; + i915_reg_t pp_ctrl_reg; if (!is_edp(intel_dp)) return; @@ -2146,27 +2223,61 @@ static void intel_edp_backlight_power(struct intel_connector *connector, _intel_edp_backlight_off(intel_dp); } +static const char *state_string(bool enabled) +{ + return enabled ? "on" : "off"; +} + +static void assert_dp_port(struct intel_dp *intel_dp, bool state) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; + + I915_STATE_WARN(cur_state != state, + "DP port %c state assertion failure (expected %s, current %s)\n", + port_name(dig_port->port), + state_string(state), state_string(cur_state)); +} +#define assert_dp_port_disabled(d) assert_dp_port((d), false) + +static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) +{ + bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE; + + I915_STATE_WARN(cur_state != state, + "eDP PLL state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_edp_pll_enabled(d) assert_edp_pll((d), true) +#define assert_edp_pll_disabled(d) assert_edp_pll((d), false) + static void ironlake_edp_pll_on(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpa_ctl; + struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, - to_intel_crtc(crtc)->pipe); + assert_pipe_disabled(dev_priv, crtc->pipe); + assert_dp_port_disabled(intel_dp); + assert_edp_pll_disabled(dev_priv); + + DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n", + crtc->config->port_clock); + + intel_dp->DP &= ~DP_PLL_FREQ_MASK; + + if (crtc->config->port_clock == 162000) + intel_dp->DP |= DP_PLL_FREQ_162MHZ; + else + intel_dp->DP |= DP_PLL_FREQ_270MHZ; + + I915_WRITE(DP_A, intel_dp->DP); + POSTING_READ(DP_A); + udelay(500); - DRM_DEBUG_KMS("\n"); - dpa_ctl = I915_READ(DP_A); - WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); - WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); - - /* We don't adjust intel_dp->DP while tearing down the link, to - * facilitate link retraining (e.g. after hotplug). Hence clear all - * enable bits here to ensure that we don't enable too much. */ - intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); intel_dp->DP |= DP_PLL_ENABLE; + I915_WRITE(DP_A, intel_dp->DP); POSTING_READ(DP_A); udelay(200); @@ -2175,24 +2286,18 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) static void ironlake_edp_pll_off(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpa_ctl; + struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, - to_intel_crtc(crtc)->pipe); + assert_pipe_disabled(dev_priv, crtc->pipe); + assert_dp_port_disabled(intel_dp); + assert_edp_pll_enabled(dev_priv); - dpa_ctl = I915_READ(DP_A); - WARN((dpa_ctl & DP_PLL_ENABLE) == 0, - "dp pll off, should be on\n"); - WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); + DRM_DEBUG_KMS("disabling eDP PLL\n"); - /* We can't rely on the value tracked for the DP register in - * intel_dp->DP because link_down must not change that (otherwise link - * re-training will fail. */ - dpa_ctl &= ~DP_PLL_ENABLE; - I915_WRITE(DP_A, dpa_ctl); + intel_dp->DP &= ~DP_PLL_ENABLE; + + I915_WRITE(DP_A, intel_dp->DP); POSTING_READ(DP_A); udelay(200); } @@ -2261,7 +2366,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, } DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", - intel_dp->output_reg); + i915_mmio_reg_offset(intel_dp->output_reg)); } else if (IS_CHERRYVIEW(dev)) { *pipe = DP_PORT_TO_PIPE_CHV(tmp); } else { @@ -2324,7 +2429,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, intel_dp_get_m_n(crtc, pipe_config); if (port == PORT_A) { - if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) + if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) pipe_config->port_clock = 162000; else pipe_config->port_clock = 270000; @@ -2389,6 +2494,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder) enum port port = dp_to_dig_port(intel_dp)->port; intel_dp_link_down(intel_dp); + + /* Only ilk+ has port A */ if (port == PORT_A) ironlake_edp_pll_off(intel_dp); } @@ -2548,6 +2655,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = + to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc); /* enable with pattern 1 (as per spec) */ _intel_dp_set_link_train(intel_dp, &intel_dp->DP, @@ -2563,6 +2672,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp) * fail when the power sequencer is freshly used for this port. */ intel_dp->DP |= DP_PORT_EN; + if (crtc->config->has_audio) + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; I915_WRITE(intel_dp->output_reg, intel_dp->DP); POSTING_READ(intel_dp->output_reg); @@ -2575,6 +2686,8 @@ static void intel_enable_dp(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); uint32_t dp_reg = I915_READ(intel_dp->output_reg); + enum port port = dp_to_dig_port(intel_dp)->port; + enum pipe pipe = crtc->pipe; if (WARN_ON(dp_reg & DP_PORT_EN)) return; @@ -2586,6 +2699,17 @@ static void intel_enable_dp(struct intel_encoder *encoder) intel_dp_enable_port(intel_dp); + if (port == PORT_A && IS_GEN5(dev_priv)) { + /* + * Underrun reporting for the other pipe was disabled in + * g4x_pre_enable_dp(). The eDP PLL and port have now been + * enabled, so it's now safe to re-enable underrun reporting. + */ + intel_wait_for_vblank_if_active(dev_priv->dev, !pipe); + intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true); + } + edp_panel_vdd_on(intel_dp); edp_panel_on(intel_dp); edp_panel_vdd_off(intel_dp, true); @@ -2608,7 +2732,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) if (crtc->config->has_audio) { DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", - pipe_name(crtc->pipe)); + pipe_name(pipe)); intel_audio_codec_enable(encoder); } } @@ -2631,16 +2755,29 @@ static void vlv_enable_dp(struct intel_encoder *encoder) static void g4x_pre_enable_dp(struct intel_encoder *encoder) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + enum port port = dp_to_dig_port(intel_dp)->port; + enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; intel_dp_prepare(encoder); + if (port == PORT_A && IS_GEN5(dev_priv)) { + /* + * We get FIFO underruns on the other pipe when + * enabling the CPU eDP PLL, and when enabling CPU + * eDP port. We could potentially avoid the PLL + * underrun with a vblank wait just prior to enabling + * the PLL, but that doesn't appear to help the port + * enable case. Just sweep it all under the rug. + */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false); + } + /* Only ilk+ has port A */ - if (dport->port == PORT_A) { - ironlake_set_pll_cpu_edp(intel_dp); + if (port == PORT_A) ironlake_edp_pll_on(intel_dp); - } } static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) @@ -2648,7 +2785,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; enum pipe pipe = intel_dp->pps_pipe; - int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); + i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); edp_panel_vdd_off_sync(intel_dp); @@ -3046,7 +3183,7 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, * Fetch AUX CH registers 0x202 - 0x207 which contain * link status information */ -static bool +bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { return intel_dp_dpcd_read_wake(&intel_dp->aux, @@ -3056,7 +3193,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ } /* These are source-specific values. */ -static uint8_t +uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -3079,7 +3216,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } -static uint8_t +uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -3421,38 +3558,6 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) return 0; } -static void -intel_get_adjust_train(struct intel_dp *intel_dp, - const uint8_t link_status[DP_LINK_STATUS_SIZE]) -{ - uint8_t v = 0; - uint8_t p = 0; - int lane; - uint8_t voltage_max; - uint8_t preemph_max; - - for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); - - if (this_v > v) - v = this_v; - if (this_p > p) - p = this_p; - } - - voltage_max = intel_dp_voltage_max(intel_dp); - if (v >= voltage_max) - v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; - - preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); - if (p >= preemph_max) - p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; - - for (lane = 0; lane < 4; lane++) - intel_dp->train_set[lane] = v | p; -} - static uint32_t gen4_signal_levels(uint8_t train_set) { @@ -3550,13 +3655,13 @@ gen7_edp_signal_levels(uint8_t train_set) } } -/* Properly updates "DP" with the correct signal levels. */ -static void -intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) +void +intel_dp_set_signal_levels(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->port; struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t signal_levels, mask = 0; uint8_t train_set = intel_dp->train_set[0]; @@ -3591,74 +3696,27 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT); - *DP = (*DP & ~mask) | signal_levels; -} - -static bool -intel_dp_set_link_train(struct intel_dp *intel_dp, - uint32_t *DP, - uint8_t dp_train_pat) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = - to_i915(intel_dig_port->base.base.dev); - uint8_t buf[sizeof(intel_dp->train_set) + 1]; - int ret, len; - - _intel_dp_set_link_train(intel_dp, DP, dp_train_pat); + intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; - I915_WRITE(intel_dp->output_reg, *DP); + I915_WRITE(intel_dp->output_reg, intel_dp->DP); POSTING_READ(intel_dp->output_reg); - - buf[0] = dp_train_pat; - if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == - DP_TRAINING_PATTERN_DISABLE) { - /* don't write DP_TRAINING_LANEx_SET on disable */ - len = 1; - } else { - /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ - memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); - len = intel_dp->lane_count + 1; - } - - ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, - buf, len); - - return ret == len; } -static bool -intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, - uint8_t dp_train_pat) -{ - if (!intel_dp->train_set_valid) - memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); - intel_dp_set_signal_levels(intel_dp, DP); - return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); -} - -static bool -intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, - const uint8_t link_status[DP_LINK_STATUS_SIZE]) +void +intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, + uint8_t dp_train_pat) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - int ret; - intel_get_adjust_train(intel_dp, link_status); - intel_dp_set_signal_levels(intel_dp, DP); + _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); - I915_WRITE(intel_dp->output_reg, *DP); + I915_WRITE(intel_dp->output_reg, intel_dp->DP); POSTING_READ(intel_dp->output_reg); - - ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, - intel_dp->train_set, intel_dp->lane_count); - - return ret == intel_dp->lane_count; } -static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) +void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -3689,232 +3747,6 @@ static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) DRM_ERROR("Timed out waiting for DP idle patterns\n"); } -/* Enable corresponding port and start training pattern 1 */ -static void -intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) -{ - struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; - struct drm_device *dev = encoder->dev; - int i; - uint8_t voltage; - int voltage_tries, loop_tries; - uint32_t DP = intel_dp->DP; - uint8_t link_config[2]; - uint8_t link_bw, rate_select; - - if (HAS_DDI(dev)) - intel_ddi_prepare_link_retrain(encoder); - - intel_dp_compute_rate(intel_dp, intel_dp->link_rate, - &link_bw, &rate_select); - - /* Write the link configuration data */ - link_config[0] = link_bw; - link_config[1] = intel_dp->lane_count; - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); - if (intel_dp->num_sink_rates) - drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, - &rate_select, 1); - - link_config[0] = 0; - link_config[1] = DP_SET_ANSI_8B10B; - drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); - - DP |= DP_PORT_EN; - - /* clock recovery */ - if (!intel_dp_reset_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_1 | - DP_LINK_SCRAMBLING_DISABLE)) { - DRM_ERROR("failed to enable link training\n"); - return; - } - - voltage = 0xff; - voltage_tries = 0; - loop_tries = 0; - for (;;) { - uint8_t link_status[DP_LINK_STATUS_SIZE]; - - drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); - if (!intel_dp_get_link_status(intel_dp, link_status)) { - DRM_ERROR("failed to get link status\n"); - break; - } - - if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { - DRM_DEBUG_KMS("clock recovery OK\n"); - break; - } - - /* - * if we used previously trained voltage and pre-emphasis values - * and we don't get clock recovery, reset link training values - */ - if (intel_dp->train_set_valid) { - DRM_DEBUG_KMS("clock recovery not ok, reset"); - /* clear the flag as we are not reusing train set */ - intel_dp->train_set_valid = false; - if (!intel_dp_reset_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_1 | - DP_LINK_SCRAMBLING_DISABLE)) { - DRM_ERROR("failed to enable link training\n"); - return; - } - continue; - } - - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) - break; - if (i == intel_dp->lane_count) { - ++loop_tries; - if (loop_tries == 5) { - DRM_ERROR("too many full retries, give up\n"); - break; - } - intel_dp_reset_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_1 | - DP_LINK_SCRAMBLING_DISABLE); - voltage_tries = 0; - continue; - } - - /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++voltage_tries; - if (voltage_tries == 5) { - DRM_ERROR("too many voltage retries, give up\n"); - break; - } - } else - voltage_tries = 0; - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - - /* Update training set as requested by target */ - if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { - DRM_ERROR("failed to update link training\n"); - break; - } - } - - intel_dp->DP = DP; -} - -static void -intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - bool channel_eq = false; - int tries, cr_tries; - uint32_t DP = intel_dp->DP; - uint32_t training_pattern = DP_TRAINING_PATTERN_2; - - /* - * Training Pattern 3 for HBR2 or 1.2 devices that support it. - * - * Intel platforms that support HBR2 also support TPS3. TPS3 support is - * also mandatory for downstream devices that support HBR2. - * - * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is - * supported but still not enabled. - */ - if (intel_dp_source_supports_hbr2(dev) && - drm_dp_tps3_supported(intel_dp->dpcd)) - training_pattern = DP_TRAINING_PATTERN_3; - else if (intel_dp->link_rate == 540000) - DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); - - /* channel equalization */ - if (!intel_dp_set_link_train(intel_dp, &DP, - training_pattern | - DP_LINK_SCRAMBLING_DISABLE)) { - DRM_ERROR("failed to start channel equalization\n"); - return; - } - - tries = 0; - cr_tries = 0; - channel_eq = false; - for (;;) { - uint8_t link_status[DP_LINK_STATUS_SIZE]; - - if (cr_tries > 5) { - DRM_ERROR("failed to train DP, aborting\n"); - break; - } - - drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); - if (!intel_dp_get_link_status(intel_dp, link_status)) { - DRM_ERROR("failed to get link status\n"); - break; - } - - /* Make sure clock is still ok */ - if (!drm_dp_clock_recovery_ok(link_status, - intel_dp->lane_count)) { - intel_dp->train_set_valid = false; - intel_dp_link_training_clock_recovery(intel_dp); - intel_dp_set_link_train(intel_dp, &DP, - training_pattern | - DP_LINK_SCRAMBLING_DISABLE); - cr_tries++; - continue; - } - - if (drm_dp_channel_eq_ok(link_status, - intel_dp->lane_count)) { - channel_eq = true; - break; - } - - /* Try 5 times, then try clock recovery if that fails */ - if (tries > 5) { - intel_dp->train_set_valid = false; - intel_dp_link_training_clock_recovery(intel_dp); - intel_dp_set_link_train(intel_dp, &DP, - training_pattern | - DP_LINK_SCRAMBLING_DISABLE); - tries = 0; - cr_tries++; - continue; - } - - /* Update training set as requested by target */ - if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { - DRM_ERROR("failed to update link training\n"); - break; - } - ++tries; - } - - intel_dp_set_idle_link_train(intel_dp); - - intel_dp->DP = DP; - - if (channel_eq) { - intel_dp->train_set_valid = true; - DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); - } -} - -void intel_dp_stop_link_train(struct intel_dp *intel_dp) -{ - intel_dp_set_link_train(intel_dp, &intel_dp->DP, - DP_TRAINING_PATTERN_DISABLE); -} - -void -intel_dp_start_link_train(struct intel_dp *intel_dp) -{ - intel_dp_link_training_clock_recovery(intel_dp); - intel_dp_link_training_channel_equalization(intel_dp); -} - static void intel_dp_link_down(struct intel_dp *intel_dp) { @@ -3957,6 +3789,13 @@ intel_dp_link_down(struct intel_dp *intel_dp) * matching HDMI port to be enabled on transcoder A. */ if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) { + /* + * We get CPU/PCH FIFO underruns on the other pipe when + * doing the workaround. Sweep them under the rug. + */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); + /* always enable with pattern 1 (as per spec) */ DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK); DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1; @@ -3966,9 +3805,15 @@ intel_dp_link_down(struct intel_dp *intel_dp) DP &= ~DP_PORT_EN; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); + + intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } msleep(intel_dp->panel_power_down_delay); + + intel_dp->DP = DP; } static bool @@ -4016,7 +3861,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", - yesno(intel_dp_source_supports_hbr2(dev)), + yesno(intel_dp_source_supports_hbr2(intel_dp)), yesno(drm_dp_tps3_supported(intel_dp->dpcd))); /* Intermediate frequency support */ @@ -4106,9 +3951,12 @@ intel_dp_probe_mst(struct intel_dp *intel_dp) static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); u8 buf; int ret = 0; + int count = 0; + int attempts = 10; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); @@ -4123,7 +3971,22 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) goto out; } - intel_dp->sink_crc.started = false; + do { + intel_wait_for_vblank(dev, intel_crtc->pipe); + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_TEST_SINK_MISC, &buf) < 0) { + ret = -EIO; + goto out; + } + count = buf & DP_TEST_COUNT_MASK; + } while (--attempts && count); + + if (attempts == 0) { + DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n"); + ret = -ETIMEDOUT; + } + out: hsw_enable_ips(intel_crtc); return ret; @@ -4132,27 +3995,26 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); u8 buf; int ret; - if (intel_dp->sink_crc.started) { - ret = intel_dp_sink_crc_stop(intel_dp); - if (ret) - return ret; - } - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) return -EIO; if (!(buf & DP_TEST_CRC_SUPPORTED)) return -ENOTTY; - intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK; - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) return -EIO; + if (buf & DP_TEST_SINK_START) { + ret = intel_dp_sink_crc_stop(intel_dp); + if (ret) + return ret; + } + hsw_disable_ips(intel_crtc); if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, @@ -4161,7 +4023,7 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) return -EIO; } - intel_dp->sink_crc.started = true; + intel_wait_for_vblank(dev, intel_crtc->pipe); return 0; } @@ -4173,7 +4035,6 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) u8 buf; int count, ret; int attempts = 6; - bool old_equal_new; ret = intel_dp_sink_crc_start(intel_dp); if (ret) @@ -4189,35 +4050,17 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) } count = buf & DP_TEST_COUNT_MASK; - /* - * Count might be reset during the loop. In this case - * last known count needs to be reset as well. - */ - if (count == 0) - intel_dp->sink_crc.last_count = 0; - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) { - ret = -EIO; - goto stop; - } - - old_equal_new = (count == intel_dp->sink_crc.last_count && - !memcmp(intel_dp->sink_crc.last_crc, crc, - 6 * sizeof(u8))); - - } while (--attempts && (count == 0 || old_equal_new)); - - intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK; - memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8)); + } while (--attempts && count == 0); if (attempts == 0) { - if (old_equal_new) { - DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n"); - } else { - DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); - ret = -ETIMEDOUT; - goto stop; - } + DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); + ret = -ETIMEDOUT; + goto stop; + } + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) { + ret = -EIO; + goto stop; } stop: @@ -4317,13 +4160,6 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp) uint8_t rxdata = 0; int status = 0; - intel_dp->compliance_test_active = 0; - intel_dp->compliance_test_type = 0; - intel_dp->compliance_test_data = 0; - - intel_dp->aux.i2c_nack_count = 0; - intel_dp->aux.i2c_defer_count = 0; - status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1); if (status <= 0) { DRM_DEBUG_KMS("Could not read test request from sink\n"); @@ -4439,6 +4275,14 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + /* + * Clearing compliance test variables to allow capturing + * of values for next automated test request. + */ + intel_dp->compliance_test_active = 0; + intel_dp->compliance_test_type = 0; + intel_dp->compliance_test_data = 0; + if (!intel_encoder->base.crtc) return; @@ -4469,7 +4313,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } - if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + /* if link training is requested we should perform it always */ + if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) || + (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", intel_encoder->base.name); intel_dp_start_link_train(intel_dp); @@ -4687,41 +4533,6 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv, return g4x_digital_port_connected(dev_priv, port); } -static enum drm_connector_status -ironlake_dp_detect(struct intel_dp *intel_dp) -{ - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - if (!intel_digital_port_connected(dev_priv, intel_dig_port)) - return connector_status_disconnected; - - return intel_dp_detect_dpcd(intel_dp); -} - -static enum drm_connector_status -g4x_dp_detect(struct intel_dp *intel_dp) -{ - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - /* Can't disconnect eDP, but you can close the lid... */ - if (is_edp(intel_dp)) { - enum drm_connector_status status; - - status = intel_panel_detect(dev); - if (status == connector_status_unknown) - status = connector_status_connected; - return status; - } - - if (!intel_digital_port_connected(dev->dev_private, intel_dig_port)) - return connector_status_disconnected; - - return intel_dp_detect_dpcd(intel_dp); -} - static struct edid * intel_dp_get_edid(struct intel_dp *intel_dp) { @@ -4765,26 +4576,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) intel_dp->has_audio = false; } -static enum intel_display_power_domain -intel_dp_power_get(struct intel_dp *dp) -{ - struct intel_encoder *encoder = &dp_to_dig_port(dp)->base; - enum intel_display_power_domain power_domain; - - power_domain = intel_display_port_power_domain(encoder); - intel_display_power_get(to_i915(encoder->base.dev), power_domain); - - return power_domain; -} - -static void -intel_dp_power_put(struct intel_dp *dp, - enum intel_display_power_domain power_domain) -{ - struct intel_encoder *encoder = &dp_to_dig_port(dp)->base; - intel_display_power_put(to_i915(encoder->base.dev), power_domain); -} - static enum drm_connector_status intel_dp_detect(struct drm_connector *connector, bool force) { @@ -4808,17 +4599,25 @@ intel_dp_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } - power_domain = intel_dp_power_get(intel_dp); + power_domain = intel_display_port_aux_power_domain(intel_encoder); + intel_display_power_get(to_i915(dev), power_domain); /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) status = edp_detect(intel_dp); - else if (HAS_PCH_SPLIT(dev)) - status = ironlake_dp_detect(intel_dp); + else if (intel_digital_port_connected(to_i915(dev), + dp_to_dig_port(intel_dp))) + status = intel_dp_detect_dpcd(intel_dp); else - status = g4x_dp_detect(intel_dp); - if (status != connector_status_connected) + status = connector_status_disconnected; + + if (status != connector_status_connected) { + intel_dp->compliance_test_active = 0; + intel_dp->compliance_test_type = 0; + intel_dp->compliance_test_data = 0; + goto out; + } intel_dp_probe_oui(intel_dp); @@ -4832,6 +4631,14 @@ intel_dp_detect(struct drm_connector *connector, bool force) goto out; } + /* + * Clearing NACK and defer counts to get their exact values + * while reading EDID which are required by Compliance tests + * 4.2.2.4 and 4.2.2.5 + */ + intel_dp->aux.i2c_nack_count = 0; + intel_dp->aux.i2c_defer_count = 0; + intel_dp_set_edid(intel_dp); if (intel_encoder->type != INTEL_OUTPUT_EDP) @@ -4853,7 +4660,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) } out: - intel_dp_power_put(intel_dp, power_domain); + intel_display_power_put(to_i915(dev), power_domain); return status; } @@ -4862,6 +4669,7 @@ intel_dp_force(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); enum intel_display_power_domain power_domain; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -4871,11 +4679,12 @@ intel_dp_force(struct drm_connector *connector) if (connector->status != connector_status_connected) return; - power_domain = intel_dp_power_get(intel_dp); + power_domain = intel_display_port_aux_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); intel_dp_set_edid(intel_dp); - intel_dp_power_put(intel_dp, power_domain); + intel_display_power_put(dev_priv, power_domain); if (intel_encoder->type != INTEL_OUTPUT_EDP) intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; @@ -5034,7 +4843,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &intel_dig_port->dp; - drm_dp_aux_unregister(&intel_dp->aux); + intel_dp_aux_fini(intel_dp); intel_dp_mst_encoder_cleanup(intel_dig_port); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); @@ -5091,7 +4900,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) * indefinitely. */ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); - power_domain = intel_display_port_power_domain(&intel_dig_port->base); + power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base); intel_display_power_get(dev_priv, power_domain); edp_panel_vdd_schedule_off(intel_dp); @@ -5153,7 +4962,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) enum intel_display_power_domain power_domain; enum irqreturn ret = IRQ_NONE; - if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && + intel_dig_port->base.type != INTEL_OUTPUT_HDMI) intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { @@ -5172,7 +4982,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) port_name(intel_dig_port->port), long_hpd ? "long" : "short"); - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); if (long_hpd) { @@ -5223,25 +5033,6 @@ put_power: return ret; } -/* Return which DP Port should be selected for Transcoder DP control */ -int -intel_trans_dp_port_sel(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; - struct intel_dp *intel_dp; - - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - intel_dp = enc_to_intel_dp(&intel_encoder->base); - - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) - return intel_dp->output_reg; - } - - return -1; -} - /* check the VBT to see whether the eDP is on another port */ bool intel_dp_is_edp(struct drm_device *dev, enum port port) { @@ -5313,7 +5104,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps_delays; u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; - int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0; + i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -5435,7 +5226,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; u32 pp_on, pp_off, pp_div, port_sel = 0; int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); - int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg; + i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg; enum port port = dp_to_dig_port(intel_dp)->port; const struct edp_power_seq *seq = &intel_dp->pps_delays; @@ -5597,7 +5388,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) DRM_ERROR("Unsupported refreshrate type\n"); } } else if (INTEL_INFO(dev)->gen > 6) { - u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder); + i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder); u32 val; val = I915_READ(reg); @@ -6015,7 +5806,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; - int type; + int type, ret; intel_dp->pps_pipe = INVALID_PIPE; @@ -6036,6 +5827,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, else intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; + if (HAS_DDI(dev)) + intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain; + /* Preserve the current hw state. */ intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->attached_connector = intel_connector; @@ -6087,7 +5881,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, break; case PORT_B: intel_encoder->hpd_pin = HPD_PORT_B; - if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) intel_encoder->hpd_pin = HPD_PORT_A; break; case PORT_C: @@ -6113,7 +5907,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, pps_unlock(intel_dp); } - intel_dp_aux_init(intel_dp, intel_connector); + ret = intel_dp_aux_init(intel_dp, intel_connector); + if (ret) + goto fail; /* init MST on ports that can support it */ if (HAS_DP_MST(dev) && @@ -6122,20 +5918,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->base.base.id); if (!intel_edp_init_connector(intel_dp, intel_connector)) { - drm_dp_aux_unregister(&intel_dp->aux); - if (is_edp(intel_dp)) { - cancel_delayed_work_sync(&intel_dp->panel_vdd_work); - /* - * vdd might still be enabled do to the delayed vdd off. - * Make sure vdd is actually turned off here. - */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); - } - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - return false; + intel_dp_aux_fini(intel_dp); + intel_dp_mst_encoder_cleanup(intel_dig_port); + goto fail; } intel_dp_add_properties(intel_dp, connector); @@ -6152,10 +5937,27 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, i915_debugfs_connector_add(connector); return true; + +fail: + if (is_edp(intel_dp)) { + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + /* + * vdd might still be enabled do to the delayed vdd off. + * Make sure vdd is actually turned off here. + */ + pps_lock(intel_dp); + edp_panel_vdd_off_sync(intel_dp); + pps_unlock(intel_dp); + } + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + + return false; } void -intel_dp_init(struct drm_device *dev, int output_reg, enum port port) +intel_dp_init(struct drm_device *dev, + i915_reg_t output_reg, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; @@ -6201,6 +6003,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) } intel_dig_port->port = port; + dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->dp.output_reg = output_reg; intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c new file mode 100644 index 000000000000..88887938e0bf --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -0,0 +1,323 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "intel_drv.h" + +static void +intel_get_adjust_train(struct intel_dp *intel_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + uint8_t v = 0; + uint8_t p = 0; + int lane; + uint8_t voltage_max; + uint8_t preemph_max; + + for (lane = 0; lane < intel_dp->lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + voltage_max = intel_dp_voltage_max(intel_dp); + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + intel_dp->train_set[lane] = v | p; +} + +static bool +intel_dp_set_link_train(struct intel_dp *intel_dp, + uint8_t dp_train_pat) +{ + uint8_t buf[sizeof(intel_dp->train_set) + 1]; + int ret, len; + + intel_dp_program_link_training_pattern(intel_dp, dp_train_pat); + + buf[0] = dp_train_pat; + if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_DISABLE) { + /* don't write DP_TRAINING_LANEx_SET on disable */ + len = 1; + } else { + /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ + memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); + len = intel_dp->lane_count + 1; + } + + ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, + buf, len); + + return ret == len; +} + +static bool +intel_dp_reset_link_train(struct intel_dp *intel_dp, + uint8_t dp_train_pat) +{ + if (!intel_dp->train_set_valid) + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); + intel_dp_set_signal_levels(intel_dp); + return intel_dp_set_link_train(intel_dp, dp_train_pat); +} + +static bool +intel_dp_update_link_train(struct intel_dp *intel_dp) +{ + int ret; + + intel_dp_set_signal_levels(intel_dp); + + ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, + intel_dp->train_set, intel_dp->lane_count); + + return ret == intel_dp->lane_count; +} + +/* Enable corresponding port and start training pattern 1 */ +static void +intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) +{ + int i; + uint8_t voltage; + int voltage_tries, loop_tries; + uint8_t link_config[2]; + uint8_t link_bw, rate_select; + + if (intel_dp->prepare_link_retrain) + intel_dp->prepare_link_retrain(intel_dp); + + intel_dp_compute_rate(intel_dp, intel_dp->link_rate, + &link_bw, &rate_select); + + /* Write the link configuration data */ + link_config[0] = link_bw; + link_config[1] = intel_dp->lane_count; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (intel_dp->num_sink_rates) + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, + &rate_select, 1); + + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + + intel_dp->DP |= DP_PORT_EN; + + /* clock recovery */ + if (!intel_dp_reset_link_train(intel_dp, + DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE)) { + DRM_ERROR("failed to enable link training\n"); + return; + } + + voltage = 0xff; + voltage_tries = 0; + loop_tries = 0; + for (;;) { + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); + break; + } + + if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("clock recovery OK\n"); + break; + } + + /* + * if we used previously trained voltage and pre-emphasis values + * and we don't get clock recovery, reset link training values + */ + if (intel_dp->train_set_valid) { + DRM_DEBUG_KMS("clock recovery not ok, reset"); + /* clear the flag as we are not reusing train set */ + intel_dp->train_set_valid = false; + if (!intel_dp_reset_link_train(intel_dp, + DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE)) { + DRM_ERROR("failed to enable link training\n"); + return; + } + continue; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == intel_dp->lane_count) { + ++loop_tries; + if (loop_tries == 5) { + DRM_ERROR("too many full retries, give up\n"); + break; + } + intel_dp_reset_link_train(intel_dp, + DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE); + voltage_tries = 0; + continue; + } + + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++voltage_tries; + if (voltage_tries == 5) { + DRM_ERROR("too many voltage retries, give up\n"); + break; + } + } else + voltage_tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Update training set as requested by target */ + intel_get_adjust_train(intel_dp, link_status); + if (!intel_dp_update_link_train(intel_dp)) { + DRM_ERROR("failed to update link training\n"); + break; + } + } +} + +static void +intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +{ + bool channel_eq = false; + int tries, cr_tries; + uint32_t training_pattern = DP_TRAINING_PATTERN_2; + + /* + * Training Pattern 3 for HBR2 or 1.2 devices that support it. + * + * Intel platforms that support HBR2 also support TPS3. TPS3 support is + * also mandatory for downstream devices that support HBR2. + * + * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is + * supported but still not enabled. + */ + if (intel_dp_source_supports_hbr2(intel_dp) && + drm_dp_tps3_supported(intel_dp->dpcd)) + training_pattern = DP_TRAINING_PATTERN_3; + else if (intel_dp->link_rate == 540000) + DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); + + /* channel equalization */ + if (!intel_dp_set_link_train(intel_dp, + training_pattern | + DP_LINK_SCRAMBLING_DISABLE)) { + DRM_ERROR("failed to start channel equalization\n"); + return; + } + + tries = 0; + cr_tries = 0; + channel_eq = false; + for (;;) { + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + if (cr_tries > 5) { + DRM_ERROR("failed to train DP, aborting\n"); + break; + } + + drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, + intel_dp->lane_count)) { + intel_dp->train_set_valid = false; + intel_dp_link_training_clock_recovery(intel_dp); + intel_dp_set_link_train(intel_dp, + training_pattern | + DP_LINK_SCRAMBLING_DISABLE); + cr_tries++; + continue; + } + + if (drm_dp_channel_eq_ok(link_status, + intel_dp->lane_count)) { + channel_eq = true; + break; + } + + /* Try 5 times, then try clock recovery if that fails */ + if (tries > 5) { + intel_dp->train_set_valid = false; + intel_dp_link_training_clock_recovery(intel_dp); + intel_dp_set_link_train(intel_dp, + training_pattern | + DP_LINK_SCRAMBLING_DISABLE); + tries = 0; + cr_tries++; + continue; + } + + /* Update training set as requested by target */ + intel_get_adjust_train(intel_dp, link_status); + if (!intel_dp_update_link_train(intel_dp)) { + DRM_ERROR("failed to update link training\n"); + break; + } + ++tries; + } + + intel_dp_set_idle_link_train(intel_dp); + + if (channel_eq) { + intel_dp->train_set_valid = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + } +} + +void intel_dp_stop_link_train(struct intel_dp *intel_dp) +{ + intel_dp_set_link_train(intel_dp, + DP_TRAINING_PATTERN_DISABLE); +} + +void +intel_dp_start_link_train(struct intel_dp *intel_dp) +{ + intel_dp_link_training_clock_recovery(intel_dp); + intel_dp_link_training_channel_equalization(intel_dp); +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 0639275fc471..8c4e7dfe304c 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) intel_mst->port = found->port; if (intel_dp->active_mst_links == 0) { - enum port port = intel_ddi_get_encoder_port(encoder); + intel_ddi_clk_select(encoder, intel_crtc->config); intel_dp_set_link_params(intel_dp, intel_crtc->config); - /* FIXME: add support for SKL */ - if (INTEL_INFO(dev)->gen < 9) - I915_WRITE(PORT_CLK_SEL(port), - intel_crtc->config->ddi_pll_sel); - intel_ddi_init_dp_buf_reg(&intel_dig_port->base); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); - intel_dp_start_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); } @@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector) { #ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); + + if (dev_priv->fbdev) + drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, + &connector->base); #endif } @@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector) { #ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); + + if (dev_priv->fbdev) + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, + &connector->base); #endif } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0598932ce623..fe58a5722b16 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -123,8 +123,6 @@ struct intel_framebuffer { struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer *fb; - struct list_head fbdev_list; - struct drm_display_mode *our_mode; int preferred_bpp; }; @@ -250,6 +248,7 @@ struct intel_atomic_state { unsigned int cdclk; bool dpll_set; struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; + struct intel_wm_config wm_config; }; struct intel_plane_state { @@ -280,6 +279,9 @@ struct intel_plane_state { int scaler_id; struct drm_intel_sprite_colorkey ckey; + + /* async flip related structures */ + struct drm_i915_gem_request *wait_req; }; struct intel_initial_plane_config { @@ -334,6 +336,21 @@ struct intel_crtc_scaler_state { /* drm_mode->private_flags */ #define I915_MODE_FLAG_INHERITED 1 +struct intel_pipe_wm { + struct intel_wm_level wm[5]; + uint32_t linetime; + bool fbc_wm_enabled; + bool pipe_enabled; + bool sprites_enabled; + bool sprites_scaled; +}; + +struct skl_pipe_wm { + struct skl_wm_level wm[8]; + struct skl_wm_level trans_wm; + uint32_t linetime; +}; + struct intel_crtc_state { struct drm_crtc_state base; @@ -468,6 +485,20 @@ struct intel_crtc_state { /* w/a for waiting 2 vblanks during crtc enable */ enum pipe hsw_workaround_pipe; + + /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ + bool disable_lp_wm; + + struct { + /* + * optimal watermarks, programmed post-vblank when this state + * is committed + */ + union { + struct intel_pipe_wm ilk; + struct skl_pipe_wm skl; + } optimal; + } wm; }; struct vlv_wm_state { @@ -479,26 +510,12 @@ struct vlv_wm_state { bool cxsr; }; -struct intel_pipe_wm { - struct intel_wm_level wm[5]; - uint32_t linetime; - bool fbc_wm_enabled; - bool pipe_enabled; - bool sprites_enabled; - bool sprites_scaled; -}; - struct intel_mmio_flip { struct work_struct work; struct drm_i915_private *i915; struct drm_i915_gem_request *req; struct intel_crtc *crtc; -}; - -struct skl_pipe_wm { - struct skl_wm_level wm[8]; - struct skl_wm_level trans_wm; - uint32_t linetime; + unsigned int rotation; }; /* @@ -509,13 +526,11 @@ struct skl_pipe_wm { */ struct intel_crtc_atomic_commit { /* Sleepable operations to perform before commit */ - bool wait_for_flips; bool disable_fbc; bool disable_ips; bool disable_cxsr; bool pre_disable_primary; bool update_wm_pre, update_wm_post; - unsigned disabled_planes; /* Sleepable operations to perform after commit */ unsigned fb_bits; @@ -568,9 +583,10 @@ struct intel_crtc { /* per-pipe watermark state */ struct { /* watermarks currently being used */ - struct intel_pipe_wm active; - /* SKL wm values currently in use */ - struct skl_pipe_wm skl_active; + union { + struct intel_pipe_wm ilk; + struct skl_pipe_wm skl; + } active; /* allow CxSR on this pipe */ bool cxsr_allowed; } wm; @@ -678,7 +694,7 @@ struct cxsr_latency { #define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) struct intel_hdmi { - u32 hdmi_reg; + i915_reg_t hdmi_reg; int ddc_bus; bool limited_color_range; bool color_range_auto; @@ -720,15 +736,10 @@ enum link_m_n_set { M2_N2 }; -struct sink_crc { - bool started; - u8 last_crc[6]; - int last_count; -}; - struct intel_dp { - uint32_t output_reg; - uint32_t aux_ch_ctl_reg; + i915_reg_t output_reg; + i915_reg_t aux_ch_ctl_reg; + i915_reg_t aux_ch_data_reg[5]; uint32_t DP; int link_rate; uint8_t lane_count; @@ -742,7 +753,6 @@ struct intel_dp { /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ uint8_t num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; - struct sink_crc sink_crc; struct drm_dp_aux aux; uint8_t train_set[4]; int panel_power_up_delay; @@ -784,6 +794,10 @@ struct intel_dp { bool has_aux_irq, int send_bytes, uint32_t aux_clock_divider); + + /* This is called before a link training is starterd */ + void (*prepare_link_retrain)(struct intel_dp *intel_dp); + bool train_set_valid; /* Displayport compliance testing */ @@ -800,6 +814,8 @@ struct intel_digital_port { struct intel_hdmi hdmi; enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); bool release_cl2_override; + /* for communication with audio component; protected by av_mutex */ + const struct drm_connector *audio_connector; }; struct intel_dp_mst_encoder { @@ -943,7 +959,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe); void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder); -void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv); +void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv); +void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv); /* i915_irq.c */ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); @@ -974,6 +991,8 @@ void intel_crt_init(struct drm_device *dev); /* intel_ddi.c */ +void intel_ddi_clk_select(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config); void intel_prepare_ddi(struct drm_device *dev); void hsw_fdi_link_train(struct drm_crtc *crtc); void intel_ddi_init(struct drm_device *dev, enum port port); @@ -988,7 +1007,7 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); bool intel_ddi_pll_select(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); -void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); +void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); void intel_ddi_fdi_disable(struct drm_crtc *crtc); void intel_ddi_get_config(struct intel_encoder *encoder, @@ -1056,6 +1075,15 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe) { drm_wait_one_vblank(dev, pipe); } +static inline void +intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe) +{ + const struct intel_crtc *crtc = + to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); + + if (crtc->active) + intel_wait_for_vblank(dev, pipe); +} int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, @@ -1069,9 +1097,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx); int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, - const struct drm_plane_state *plane_state, - struct intel_engine_cs *pipelined, - struct drm_i915_gem_request **pipelined_request); + const struct drm_plane_state *plane_state); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, @@ -1152,7 +1178,10 @@ void broxton_ddi_phy_uninit(struct drm_device *dev); void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv); void skl_init_cdclk(struct drm_i915_private *dev_priv); +int skl_sanitize_cdclk(struct drm_i915_private *dev_priv); void skl_uninit_cdclk(struct drm_i915_private *dev_priv); +void skl_enable_dc6(struct drm_i915_private *dev_priv); +void skl_disable_dc6(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); @@ -1169,33 +1198,30 @@ void hsw_enable_ips(struct intel_crtc *crtc); void hsw_disable_ips(struct intel_crtc *crtc); enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder); +enum intel_display_power_domain +intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config); -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane); +u32 intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj, + unsigned int plane); u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_tiling(uint64_t fb_modifier); u32 skl_plane_ctl_rotation(unsigned int rotation); /* intel_csr.c */ -void intel_csr_ucode_init(struct drm_device *dev); -enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv); -void intel_csr_load_status_set(struct drm_i915_private *dev_priv, - enum csr_state state); -void intel_csr_load_program(struct drm_device *dev); -void intel_csr_ucode_fini(struct drm_device *dev); -void assert_csr_loaded(struct drm_i915_private *dev_priv); +void intel_csr_ucode_init(struct drm_i915_private *); +void intel_csr_load_program(struct drm_i915_private *); +void intel_csr_ucode_fini(struct drm_i915_private *); /* intel_dp.c */ -void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); +void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -1233,6 +1259,22 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *port); void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); +void +intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, + uint8_t dp_train_pat); +void +intel_dp_set_signal_levels(struct intel_dp *intel_dp); +void intel_dp_set_idle_link_train(struct intel_dp *intel_dp); +uint8_t +intel_dp_voltage_max(struct intel_dp *intel_dp); +uint8_t +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing); +void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, + uint8_t *link_bw, uint8_t *rate_select); +bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); +bool +intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); + /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); @@ -1247,7 +1289,7 @@ void intel_dvo_init(struct drm_device *dev); /* legacy fbdev emulation in intel_fbdev.c */ #ifdef CONFIG_DRM_FBDEV_EMULATION extern int intel_fbdev_init(struct drm_device *dev); -extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); +extern void intel_fbdev_initial_config_async(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); extern void intel_fbdev_output_poll_changed(struct drm_device *dev); @@ -1258,7 +1300,7 @@ static inline int intel_fbdev_init(struct drm_device *dev) return 0; } -static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie) +static inline void intel_fbdev_initial_config_async(struct drm_device *dev) { } @@ -1286,11 +1328,10 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, enum fb_op_origin origin); void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); -const char *intel_no_fbc_reason_str(enum no_fbc_reason reason); void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); /* intel_hdmi.c */ -void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); +void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port); void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); @@ -1366,7 +1407,10 @@ void intel_psr_single_frame_update(struct drm_device *dev, /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); void intel_power_domains_fini(struct drm_i915_private *); -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); +void intel_power_domains_suspend(struct drm_i915_private *dev_priv); +void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv); +void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, @@ -1377,8 +1421,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); -void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); void intel_runtime_pm_get(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv); @@ -1396,12 +1438,6 @@ void intel_init_clock_gating(struct drm_device *dev); void intel_suspend_hw(struct drm_device *dev); int ilk_wm_max_level(const struct drm_device *dev); void intel_update_watermarks(struct drm_crtc *crtc); -void intel_update_sprite_watermarks(struct drm_plane *plane, - struct drm_crtc *crtc, - uint32_t sprite_width, - uint32_t sprite_height, - int pixel_size, - bool enabled, bool scaled); void intel_init_pm(struct drm_device *dev); void intel_pm_setup(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); @@ -1429,7 +1465,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); /* intel_sdvo.c */ -bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); +bool intel_sdvo_init(struct drm_device *dev, + i915_reg_t reg, enum port port); /* intel_sprite.c */ diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 170ae6f4866e..efb5a27dd49c 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) DRM_ERROR("DPI FIFOs are not empty\n"); } -static void write_data(struct drm_i915_private *dev_priv, u32 reg, +static void write_data(struct drm_i915_private *dev_priv, + i915_reg_t reg, const u8 *data, u32 len) { u32 i, j; @@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg, } } -static void read_data(struct drm_i915_private *dev_priv, u32 reg, +static void read_data(struct drm_i915_private *dev_priv, + i915_reg_t reg, u8 *data, u32 len) { u32 i, j; @@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, struct mipi_dsi_packet packet; ssize_t ret; const u8 *header, *data; - u32 data_reg, data_mask, ctrl_reg, ctrl_mask; + i915_reg_t data_reg, ctrl_reg; + u32 data_mask, ctrl_mask; ret = mipi_dsi_create_packet(&packet, msg); if (ret < 0) @@ -377,10 +380,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; - u32 temp; - u32 port_ctrl; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { + u32 temp; + temp = I915_READ(VLV_CHICKEN_3); temp &= ~PIXEL_OVERLAP_CNT_MASK | intel_dsi->pixel_overlap << @@ -389,8 +392,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) } for_each_dsi_port(port, intel_dsi->ports) { - port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : - MIPI_PORT_CTRL(port); + i915_reg_t port_ctrl = IS_BROXTON(dev) ? + BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); + u32 temp; temp = I915_READ(port_ctrl); @@ -416,13 +420,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; - u32 temp; - u32 port_ctrl; for_each_dsi_port(port, intel_dsi->ports) { + i915_reg_t port_ctrl = IS_BROXTON(dev) ? + BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); + u32 temp; + /* de-assert ip_tg_enable signal */ - port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : - MIPI_PORT_CTRL(port); temp = I915_READ(port_ctrl); I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); POSTING_READ(port_ctrl); @@ -580,11 +584,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; - u32 val; - u32 port_ctrl = 0; DRM_DEBUG_KMS("\n"); for_each_dsi_port(port, intel_dsi->ports) { + /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ + i915_reg_t port_ctrl = IS_BROXTON(dev) ? + BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); + u32 val; I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER); @@ -598,12 +604,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) ULPS_STATE_ENTER); usleep_range(2000, 2500); - if (IS_BROXTON(dev)) - port_ctrl = BXT_MIPI_PORT_CTRL(port); - else if (IS_VALLEYVIEW(dev)) - /* Common bit for both MIPI Port A & MIPI Port C */ - port_ctrl = MIPI_PORT_CTRL(PORT_A); - /* Wait till Clock lanes are in LP-00 state for MIPI Port A * only. MIPI Port C has no similar bit for checking */ @@ -656,7 +656,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct drm_device *dev = encoder->base.dev; enum intel_display_power_domain power_domain; - u32 dpi_enabled, func, ctrl_reg; enum port port; DRM_DEBUG_KMS("\n"); @@ -667,9 +666,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { + i915_reg_t ctrl_reg = IS_BROXTON(dev) ? + BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); + u32 dpi_enabled, func; + func = I915_READ(MIPI_DSI_FUNC_PRG(port)); - ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : - MIPI_PORT_CTRL(port); dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; /* Due to some hardware limitations on BYT, MIPI Port C DPI diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index e6cb25239941..02551ff228c2 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -117,7 +117,7 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) #define for_each_dsi_port(__port, __ports_mask) \ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ - if ((__ports_mask) & (1 << (__port))) + for_each_if ((__ports_mask) & (1 << (__port))) static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) { diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 8492053e0ff0..7161deb2aed8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -44,6 +44,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_TMDS, .name = "sil164", .dvo_reg = DVOC, + .dvo_srcdim_reg = DVOC_SRCDIM, .slave_addr = SIL164_ADDR, .dev_ops = &sil164_ops, }, @@ -51,6 +52,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .dvo_reg = DVOC, + .dvo_srcdim_reg = DVOC_SRCDIM, .slave_addr = CH7xxx_ADDR, .dev_ops = &ch7xxx_ops, }, @@ -58,6 +60,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .dvo_reg = DVOC, + .dvo_srcdim_reg = DVOC_SRCDIM, .slave_addr = 0x75, /* For some ch7010 */ .dev_ops = &ch7xxx_ops, }, @@ -65,6 +68,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_LVDS, .name = "ivch", .dvo_reg = DVOA, + .dvo_srcdim_reg = DVOA_SRCDIM, .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ .dev_ops = &ivch_ops, }, @@ -72,6 +76,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_TMDS, .name = "tfp410", .dvo_reg = DVOC, + .dvo_srcdim_reg = DVOC_SRCDIM, .slave_addr = TFP410_ADDR, .dev_ops = &tfp410_ops, }, @@ -79,6 +84,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_LVDS, .name = "ch7017", .dvo_reg = DVOC, + .dvo_srcdim_reg = DVOC_SRCDIM, .slave_addr = 0x75, .gpio = GMBUS_PIN_DPB, .dev_ops = &ch7017_ops, @@ -87,6 +93,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_TMDS, .name = "ns2501", .dvo_reg = DVOB, + .dvo_srcdim_reg = DVOB_SRCDIM, .slave_addr = NS2501_ADDR, .dev_ops = &ns2501_ops, } @@ -171,7 +178,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); - u32 dvo_reg = intel_dvo->dev.dvo_reg; + i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); @@ -184,7 +191,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - u32 dvo_reg = intel_dvo->dev.dvo_reg; + i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, @@ -255,20 +262,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder) struct intel_dvo *intel_dvo = enc_to_dvo(encoder); int pipe = crtc->pipe; u32 dvo_val; - u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; - - switch (dvo_reg) { - case DVOA: - default: - dvo_srcdim_reg = DVOA_SRCDIM; - break; - case DVOB: - dvo_srcdim_reg = DVOB_SRCDIM; - break; - case DVOC: - dvo_srcdim_reg = DVOC_SRCDIM; - break; - } + i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; + i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg; /* Save the data order, since I don't know what it should be set to. */ dvo_val = I915_READ(dvo_reg) & diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index cf47352b7b8e..11fc5281e8ef 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -46,6 +46,11 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv) return dev_priv->fbc.enable_fbc != NULL; } +static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) +{ + return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8; +} + /* * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's @@ -182,7 +187,8 @@ static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv) return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } -static void intel_fbc_nuke(struct drm_i915_private *dev_priv) +/* This function forces a CFB recompression through the nuke operation. */ +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) { I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); POSTING_READ(MSG_FBC_REND_STATE); @@ -231,7 +237,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc) I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); } - intel_fbc_nuke(dev_priv); + intel_fbc_recompress(dev_priv); DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); } @@ -310,7 +316,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc) SNB_CPU_FENCE_ENABLE | obj->fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); - intel_fbc_nuke(dev_priv); + intel_fbc_recompress(dev_priv); DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); } @@ -370,8 +376,6 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) if (dev_priv->fbc.fbc_work == NULL) return; - DRM_DEBUG_KMS("cancelling pending FBC enable\n"); - /* Synchronisation is provided by struct_mutex and checking of * dev_priv->fbc.fbc_work, so we can perform the cancellation * entirely asynchronously. @@ -432,7 +436,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) intel_fbc_cancel_work(dev_priv); - dev_priv->fbc.disable_fbc(dev_priv); + if (dev_priv->fbc.enabled) + dev_priv->fbc.disable_fbc(dev_priv); dev_priv->fbc.crtc = NULL; } @@ -471,78 +476,45 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc) mutex_unlock(&dev_priv->fbc.lock); } -const char *intel_no_fbc_reason_str(enum no_fbc_reason reason) -{ - switch (reason) { - case FBC_OK: - return "FBC enabled but currently disabled in hardware"; - case FBC_UNSUPPORTED: - return "unsupported by this chipset"; - case FBC_NO_OUTPUT: - return "no output"; - case FBC_STOLEN_TOO_SMALL: - return "not enough stolen memory"; - case FBC_UNSUPPORTED_MODE: - return "mode incompatible with compression"; - case FBC_MODE_TOO_LARGE: - return "mode too large for compression"; - case FBC_BAD_PLANE: - return "FBC unsupported on plane"; - case FBC_NOT_TILED: - return "framebuffer not tiled or fenced"; - case FBC_MULTIPLE_PIPES: - return "more than one pipe active"; - case FBC_MODULE_PARAM: - return "disabled per module param"; - case FBC_CHIP_DEFAULT: - return "disabled per chip default"; - case FBC_ROTATION: - return "rotation unsupported"; - case FBC_IN_DBG_MASTER: - return "Kernel debugger is active"; - case FBC_BAD_STRIDE: - return "framebuffer stride not supported"; - case FBC_PIXEL_RATE: - return "pixel rate is too big"; - case FBC_PIXEL_FORMAT: - return "pixel format is invalid"; - default: - MISSING_CASE(reason); - return "unknown reason"; - } -} - static void set_no_fbc_reason(struct drm_i915_private *dev_priv, - enum no_fbc_reason reason) + const char *reason) { if (dev_priv->fbc.no_fbc_reason == reason) return; dev_priv->fbc.no_fbc_reason = reason; - DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason)); + DRM_DEBUG_KMS("Disabling FBC: %s\n", reason); +} + +static bool crtc_is_valid(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + + if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) + return false; + + if (!intel_crtc_active(&crtc->base)) + return false; + + if (!to_intel_plane_state(crtc->base.primary->state)->visible) + return false; + + return true; } static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) { struct drm_crtc *crtc = NULL, *tmp_crtc; enum pipe pipe; - bool pipe_a_only = false; - - if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) - pipe_a_only = true; for_each_pipe(dev_priv, pipe) { tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - if (intel_crtc_active(tmp_crtc) && - to_intel_plane_state(tmp_crtc->primary->state)->visible) + if (crtc_is_valid(to_intel_crtc(tmp_crtc))) crtc = tmp_crtc; - - if (pipe_a_only) - break; } - if (!crtc || crtc->primary->fb == NULL) + if (!crtc) return NULL; return crtc; @@ -581,7 +553,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, * reserved range size, so it always assumes the maximum (8mb) is used. * If we enable FBC using a CFB on that memory range we'll get FIFO * underruns, even if that range is not reserved by the BIOS. */ - if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) + if (IS_BROADWELL(dev_priv) || + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024; else end = dev_priv->gtt.stolen_usable_size; @@ -734,6 +707,7 @@ static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc) if (INTEL_INFO(dev_priv)->gen >= 7) lines = min(lines, 2048); + /* Hardware needs the full buffer stride, not just the active area. */ return lines * fb->pitches[0]; } @@ -832,84 +806,62 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) * __intel_fbc_update - enable/disable FBC as needed, unlocked * @dev_priv: i915 device instance * - * Set up the framebuffer compression hardware at mode set time. We - * enable it if possible: - * - plane A only (on pre-965) - * - no pixel mulitply/line duplication - * - no alpha buffer discard - * - no dual wide - * - framebuffer <= max_hdisplay in width, max_vdisplay in height - * - * We can't assume that any compression will take place (worst case), - * so the compressed buffer has to be the same size as the uncompressed - * one. It also must reside (along with the line length buffer) in - * stolen memory. - * - * We need to enable/disable FBC on a global basis. + * This function completely reevaluates the status of FBC, then enables, + * disables or maintains it on the same state. */ static void __intel_fbc_update(struct drm_i915_private *dev_priv) { - struct drm_crtc *crtc = NULL; - struct intel_crtc *intel_crtc; + struct drm_crtc *drm_crtc = NULL; + struct intel_crtc *crtc; struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; const struct drm_display_mode *adjusted_mode; WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - /* disable framebuffer compression in vGPU */ if (intel_vgpu_active(dev_priv->dev)) i915.enable_fbc = 0; if (i915.enable_fbc < 0) { - set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT); + set_no_fbc_reason(dev_priv, "disabled per chip default"); goto out_disable; } if (!i915.enable_fbc) { - set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM); + set_no_fbc_reason(dev_priv, "disabled per module param"); goto out_disable; } - /* - * If FBC is already on, we just have to verify that we can - * keep it that way... - * Need to disable if: - * - more than one pipe is active - * - changing FBC params (stride, fence, mode) - * - new fb is too large to fit in compressed buffer - * - going to an unsupported config (interlace, pixel multiply, etc.) - */ - crtc = intel_fbc_find_crtc(dev_priv); - if (!crtc) { - set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT); + drm_crtc = intel_fbc_find_crtc(dev_priv); + if (!drm_crtc) { + set_no_fbc_reason(dev_priv, "no output"); goto out_disable; } if (!multiple_pipes_ok(dev_priv)) { - set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES); + set_no_fbc_reason(dev_priv, "more than one pipe active"); goto out_disable; } - intel_crtc = to_intel_crtc(crtc); - fb = crtc->primary->fb; + crtc = to_intel_crtc(drm_crtc); + fb = crtc->base.primary->fb; obj = intel_fb_obj(fb); - adjusted_mode = &intel_crtc->config->base.adjusted_mode; + adjusted_mode = &crtc->config->base.adjusted_mode; if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { - set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE); + set_no_fbc_reason(dev_priv, "incompatible mode"); goto out_disable; } - if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) { - set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE); + if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + set_no_fbc_reason(dev_priv, "mode too large for compression"); goto out_disable; } if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) && - intel_crtc->plane != PLANE_A) { - set_no_fbc_reason(dev_priv, FBC_BAD_PLANE); + crtc->plane != PLANE_A) { + set_no_fbc_reason(dev_priv, "FBC unsupported on plane"); goto out_disable; } @@ -918,41 +870,35 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) */ if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { - set_no_fbc_reason(dev_priv, FBC_NOT_TILED); + set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced"); goto out_disable; } if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && - crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) { - set_no_fbc_reason(dev_priv, FBC_ROTATION); + crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) { + set_no_fbc_reason(dev_priv, "rotation unsupported"); goto out_disable; } if (!stride_is_valid(dev_priv, fb->pitches[0])) { - set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE); + set_no_fbc_reason(dev_priv, "framebuffer stride not supported"); goto out_disable; } if (!pixel_format_is_valid(fb)) { - set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT); - goto out_disable; - } - - /* If the kernel debugger is active, always disable compression */ - if (in_dbg_master()) { - set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER); + set_no_fbc_reason(dev_priv, "pixel format is invalid"); goto out_disable; } /* WaFbcExceedCdClockThreshold:hsw,bdw */ if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && - ilk_pipe_pixel_rate(intel_crtc->config) >= + ilk_pipe_pixel_rate(crtc->config) >= dev_priv->cdclk_freq * 95 / 100) { - set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE); + set_no_fbc_reason(dev_priv, "pixel rate is too big"); goto out_disable; } - if (intel_fbc_setup_cfb(intel_crtc)) { - set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); + if (intel_fbc_setup_cfb(crtc)) { + set_no_fbc_reason(dev_priv, "not enough stolen memory"); goto out_disable; } @@ -961,9 +907,9 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) * cannot be unpinned (and have its GTT offset and fence revoked) * without first being decoupled from the scanout and FBC disabled. */ - if (dev_priv->fbc.crtc == intel_crtc && + if (dev_priv->fbc.crtc == crtc && dev_priv->fbc.fb_id == fb->base.id && - dev_priv->fbc.y == crtc->y) + dev_priv->fbc.y == crtc->base.y) return; if (intel_fbc_enabled(dev_priv)) { @@ -994,8 +940,8 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) __intel_fbc_disable(dev_priv); } - intel_fbc_schedule_enable(intel_crtc); - dev_priv->fbc.no_fbc_reason = FBC_OK; + intel_fbc_schedule_enable(crtc); + dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)"; return; out_disable: @@ -1085,10 +1031,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) enum pipe pipe; mutex_init(&dev_priv->fbc.lock); + dev_priv->fbc.enabled = false; if (!HAS_FBC(dev_priv)) { - dev_priv->fbc.enabled = false; - dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; + dev_priv->fbc.no_fbc_reason = "unsupported by this chipset"; return; } @@ -1096,7 +1042,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) dev_priv->fbc.possible_framebuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(pipe); - if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) + if (fbc_on_pipe_a_only(dev_priv)) break; } @@ -1121,5 +1067,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); } - dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv); + /* We still don't have any sort of hardware state readout for FBC, so + * disable it in case the BIOS enabled it to make sure software matches + * the hardware state. */ + if (dev_priv->fbc.fbc_enabled(dev_priv)) + dev_priv->fbc.disable_fbc(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 4fd5fdfef6bd..7ccde58f8c98 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); - struct drm_framebuffer *fb; + struct drm_framebuffer *fb = NULL; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; @@ -138,6 +138,8 @@ static int intelfb_alloc(struct drm_fb_helper *helper, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + mutex_lock(&dev->struct_mutex); + size = mode_cmd.pitches[0] * mode_cmd.height; size = PAGE_ALIGN(size); @@ -156,26 +158,28 @@ static int intelfb_alloc(struct drm_fb_helper *helper, fb = __intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) { + drm_gem_object_unreference(&obj->base); ret = PTR_ERR(fb); - goto out_unref; + goto out; } /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL); + ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL); if (ret) { DRM_ERROR("failed to pin obj: %d\n", ret); - goto out_fb; + goto out; } + mutex_unlock(&dev->struct_mutex); + ifbdev->fb = to_intel_framebuffer(fb); return 0; -out_fb: - drm_framebuffer_remove(fb); -out_unref: - drm_gem_object_unreference(&obj->base); out: + mutex_unlock(&dev->struct_mutex); + if (!IS_ERR_OR_NULL(fb)) + drm_framebuffer_unreference(fb); return ret; } @@ -193,8 +197,6 @@ static int intelfb_create(struct drm_fb_helper *helper, int size, ret; bool prealloc = false; - mutex_lock(&dev->struct_mutex); - if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { @@ -209,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper, DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) - goto out_unlock; + return ret; intel_fb = ifbdev->fb; } else { DRM_DEBUG_KMS("re-using BIOS fb\n"); @@ -221,8 +223,11 @@ static int intelfb_create(struct drm_fb_helper *helper, obj = intel_fb->obj; size = obj->base.size; + mutex_lock(&dev->struct_mutex); + info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { + DRM_ERROR("Failed to allocate fb_info\n"); ret = PTR_ERR(info); goto out_unpin; } @@ -249,6 +254,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), size); if (!info->screen_base) { + DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); ret = -ENOSPC; goto out_destroy_fbi; } @@ -281,8 +287,6 @@ out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: i915_gem_object_ggtt_unpin(obj); - drm_gem_object_unreference(&obj->base); -out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -526,8 +530,10 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); - drm_framebuffer_unregister_private(&ifbdev->fb->base); - drm_framebuffer_remove(&ifbdev->fb->base); + if (ifbdev->fb) { + drm_framebuffer_unregister_private(&ifbdev->fb->base); + drm_framebuffer_remove(&ifbdev->fb->base); + } } /* @@ -702,13 +708,20 @@ int intel_fbdev_init(struct drm_device *dev) return 0; } -void intel_fbdev_initial_config(void *data, async_cookie_t cookie) +static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) { struct drm_i915_private *dev_priv = data; struct intel_fbdev *ifbdev = dev_priv->fbdev; /* Due to peculiar init order wrt to hpd handling this is separate. */ - drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); + if (drm_fb_helper_initial_config(&ifbdev->helper, + ifbdev->preferred_bpp)) + intel_fbdev_fini(dev_priv->dev); +} + +void intel_fbdev_initial_config_async(struct drm_device *dev) +{ + async_schedule(intel_fbdev_initial_config, to_i915(dev)); } void intel_fbdev_fini(struct drm_device *dev) @@ -719,7 +732,8 @@ void intel_fbdev_fini(struct drm_device *dev) flush_work(&dev_priv->fbdev_suspend_work); - async_synchronize_full(); + if (!current_is_async()) + async_synchronize_full(); intel_fbdev_destroy(dev, dev_priv->fbdev); kfree(dev_priv->fbdev); dev_priv->fbdev = NULL; diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 54daa66c6970..7ae182d0594b 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -84,38 +84,21 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) return true; } -/** - * i9xx_check_fifo_underruns - check for fifo underruns - * @dev_priv: i915 device instance - * - * This function checks for fifo underruns on GMCH platforms. This needs to be - * done manually on modeset to make sure that we catch all underruns since they - * do not generate an interrupt by themselves on these platforms. - */ -void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv) +static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) { - struct intel_crtc *crtc; - - spin_lock_irq(&dev_priv->irq_lock); - - for_each_intel_crtc(dev_priv->dev, crtc) { - u32 reg = PIPESTAT(crtc->pipe); - u32 pipestat; - - if (crtc->cpu_fifo_underrun_disabled) - continue; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg = PIPESTAT(crtc->pipe); + u32 pipestat = I915_READ(reg) & 0xffff0000; - pipestat = I915_READ(reg) & 0xffff0000; - if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) - continue; + assert_spin_locked(&dev_priv->irq_lock); - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); + if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) + return; - DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); - } + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); + POSTING_READ(reg); - spin_unlock_irq(&dev_priv->irq_lock); + DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); } static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, @@ -123,7 +106,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, bool enable, bool old) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg = PIPESTAT(pipe); + i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0xffff0000; assert_spin_locked(&dev_priv->irq_lock); @@ -150,6 +133,23 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, ironlake_disable_display_irq(dev_priv, bit); } +static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + uint32_t err_int = I915_READ(GEN7_ERR_INT); + + assert_spin_locked(&dev_priv->irq_lock); + + if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0) + return; + + I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); + POSTING_READ(GEN7_ERR_INT); + + DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe)); +} + static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) @@ -202,6 +202,24 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, ibx_disable_display_interrupt(dev_priv, bit); } +static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder pch_transcoder = (enum transcoder) crtc->pipe; + uint32_t serr_int = I915_READ(SERR_INT); + + assert_spin_locked(&dev_priv->irq_lock); + + if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0) + return; + + I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); + POSTING_READ(SERR_INT); + + DRM_ERROR("pch fifo underrun on pch transcoder %c\n", + transcoder_name(pch_transcoder)); +} + static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable, bool old) @@ -375,3 +393,56 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, DRM_ERROR("PCH transcoder %c FIFO underrun\n", transcoder_name(pch_transcoder)); } + +/** + * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately + * @dev_priv: i915 device instance + * + * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared + * error interrupt may have been disabled, and so CPU fifo underruns won't + * necessarily raise an interrupt, and on GMCH platforms where underruns never + * raise an interrupt. + */ +void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + spin_lock_irq(&dev_priv->irq_lock); + + for_each_intel_crtc(dev_priv->dev, crtc) { + if (crtc->cpu_fifo_underrun_disabled) + continue; + + if (HAS_GMCH_DISPLAY(dev_priv)) + i9xx_check_fifo_underruns(crtc); + else if (IS_GEN7(dev_priv)) + ivybridge_check_fifo_underruns(crtc); + } + + spin_unlock_irq(&dev_priv->irq_lock); +} + +/** + * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately + * @dev_priv: i915 device instance + * + * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared + * error interrupt may have been disabled, and so PCH fifo underruns won't + * necessarily raise an interrupt. + */ +void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + spin_lock_irq(&dev_priv->irq_lock); + + for_each_intel_crtc(dev_priv->dev, crtc) { + if (crtc->pch_fifo_underrun_disabled) + continue; + + if (HAS_PCH_CPT(dev_priv)) + cpt_check_pch_fifo_underruns(crtc); + } + + spin_unlock_irq(&dev_priv->irq_lock); +} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 081d5f648d26..5ba586683c87 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -76,11 +76,17 @@ struct intel_guc_fw { uint16_t guc_fw_minor_wanted; uint16_t guc_fw_major_found; uint16_t guc_fw_minor_found; + + uint32_t header_size; + uint32_t header_offset; + uint32_t rsa_size; + uint32_t rsa_offset; + uint32_t ucode_size; + uint32_t ucode_offset; }; struct intel_guc { struct intel_guc_fw guc_fw; - uint32_t log_flags; struct drm_i915_gem_object *log_obj; diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 593d2f585978..40b2ea572e16 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -122,6 +122,78 @@ #define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) +/** + * DOC: GuC Firmware Layout + * + * The GuC firmware layout looks like this: + * + * +-------------------------------+ + * | guc_css_header | + * | contains major/minor version | + * +-------------------------------+ + * | uCode | + * +-------------------------------+ + * | RSA signature | + * +-------------------------------+ + * | modulus key | + * +-------------------------------+ + * | exponent val | + * +-------------------------------+ + * + * The firmware may or may not have modulus key and exponent data. The header, + * uCode and RSA signature are must-have components that will be used by driver. + * Length of each components, which is all in dwords, can be found in header. + * In the case that modulus and exponent are not present in fw, a.k.a truncated + * image, the length value still appears in header. + * + * Driver will do some basic fw size validation based on the following rules: + * + * 1. Header, uCode and RSA are must-have components. + * 2. All firmware components, if they present, are in the sequence illustrated + * in the layout table above. + * 3. Length info of each component can be found in header, in dwords. + * 4. Modulus and exponent key are not required by driver. They may not appear + * in fw. So driver will load a truncated firmware in this case. + */ + +struct guc_css_header { + uint32_t module_type; + /* header_size includes all non-uCode bits, including css_header, rsa + * key, modulus key and exponent data. */ + uint32_t header_size_dw; + uint32_t header_version; + uint32_t module_id; + uint32_t module_vendor; + union { + struct { + uint8_t day; + uint8_t month; + uint16_t year; + }; + uint32_t date; + }; + uint32_t size_dw; /* uCode plus header_size_dw */ + uint32_t key_size_dw; + uint32_t modulus_size_dw; + uint32_t exponent_size_dw; + union { + struct { + uint8_t hour; + uint8_t min; + uint16_t sec; + }; + uint32_t time; + }; + + char username[8]; + char buildnumber[12]; + uint32_t device_id; + uint32_t guc_sw_version; + uint32_t prod_preprod_fw; + uint32_t reserved[12]; + uint32_t header_info; +} __packed; + struct guc_doorbell_info { u32 db_status; u32 cookie; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 3541f76c65a7..550921f2ef7d 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -31,7 +31,7 @@ #include "intel_guc.h" /** - * DOC: GuC + * DOC: GuC-specific firmware loader * * intel_guc: * Top level structure of guc. It handles firmware loading and manages client @@ -208,16 +208,6 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, /* * Transfer the firmware image to RAM for execution by the microcontroller. * - * GuC Firmware layout: - * +-------------------------------+ ---- - * | CSS header | 128B - * | contains major/minor version | - * +-------------------------------+ ---- - * | uCode | - * +-------------------------------+ ---- - * | RSA signature | 256B - * +-------------------------------+ ---- - * * Architecturally, the DMA engine is bidirectional, and can potentially even * transfer between GTT locations. This functionality is left out of the API * for now as there is no need for it. @@ -225,33 +215,29 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, * Note that GuC needs the CSS header plus uKernel code to be copied by the * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. */ - -#define UOS_CSS_HEADER_OFFSET 0 -#define UOS_VER_MINOR_OFFSET 0x44 -#define UOS_VER_MAJOR_OFFSET 0x46 -#define UOS_CSS_HEADER_SIZE 0x80 -#define UOS_RSA_SIG_SIZE 0x100 - static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; unsigned long offset; struct sg_table *sg = fw_obj->pages; - u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)]; + u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT]; int i, ret = 0; - /* uCode size, also is where RSA signature starts */ - offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE; - I915_WRITE(DMA_COPY_SIZE, ucode_size); + /* where RSA signature starts */ + offset = guc_fw->rsa_offset; /* Copy RSA signature from the fw image to HW for verification */ - sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); - for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) + sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset); + for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++) I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); + /* The header plus uCode will be copied to WOPCM via DMA, excluding any + * other components */ + I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); + /* Set the source address for the new blob */ - offset = i915_gem_obj_ggtt_offset(fw_obj); + offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset; I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); @@ -322,8 +308,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); /* WaDisableMinuteIaClockGating:skl,bxt */ - if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & ~GUC_ENABLE_MIA_CLOCK_GATING)); } @@ -378,6 +364,9 @@ int intel_guc_ucode_load(struct drm_device *dev) struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; int err = 0; + if (!i915.enable_guc_submission) + return 0; + DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); @@ -457,10 +446,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) { struct drm_i915_gem_object *obj; const struct firmware *fw; - const u8 *css_header; - const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE; - const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE - - 0x8000; /* 32k reserved (8K stack + 24k context) */ + struct guc_css_header *css; + size_t size; int err; DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n", @@ -474,12 +461,52 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n", guc_fw->guc_fw_path, fw); - DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n", - fw->size, minsize, maxsize); - /* Check the size of the blob befoe examining buffer contents */ - if (fw->size < minsize || fw->size > maxsize) + /* Check the size of the blob before examining buffer contents */ + if (fw->size < sizeof(struct guc_css_header)) { + DRM_ERROR("Firmware header is missing\n"); goto fail; + } + + css = (struct guc_css_header *)fw->data; + + /* Firmware bits always start from header */ + guc_fw->header_offset = 0; + guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - + css->key_size_dw - css->exponent_size_dw) * sizeof(u32); + + if (guc_fw->header_size != sizeof(struct guc_css_header)) { + DRM_ERROR("CSS header definition mismatch\n"); + goto fail; + } + + /* then, uCode */ + guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size; + guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); + + /* now RSA */ + if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) { + DRM_ERROR("RSA key size is bad\n"); + goto fail; + } + guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size; + guc_fw->rsa_size = css->key_size_dw * sizeof(u32); + + /* At least, it should have header, uCode and RSA. Size of all three. */ + size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size; + if (fw->size < size) { + DRM_ERROR("Missing firmware components\n"); + goto fail; + } + + /* Header and uCode will be loaded to WOPCM. Size of the two. */ + size = guc_fw->header_size + guc_fw->ucode_size; + + /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */ + if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) { + DRM_ERROR("Firmware is too large to fit in WOPCM\n"); + goto fail; + } /* * The GuC firmware image has the version number embedded at a well-known @@ -487,9 +514,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) * TWO bytes each (i.e. u16), although all pointers and offsets are defined * in terms of bytes (u8). */ - css_header = fw->data + UOS_CSS_HEADER_OFFSET; - guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET); - guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET); + guc_fw->guc_fw_major_found = css->guc_sw_version >> 16; + guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF; if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted || guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) { @@ -566,6 +592,9 @@ void intel_guc_ucode_init(struct drm_device *dev) fw_path = ""; /* unknown device */ } + if (!i915.enable_guc_submission) + return; + guc_fw->guc_dev = dev; guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9eafa191cee2..c046017be786 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -113,10 +113,11 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) } } -static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder, - enum hdmi_infoframe_type type, - int i) +static i915_reg_t +hsw_dip_data_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, + enum hdmi_infoframe_type type, + int i) { switch (type) { case HDMI_INFOFRAME_TYPE_AVI: @@ -127,7 +128,7 @@ static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv, return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); default: DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); - return 0; + return INVALID_MMIO_REG; } } @@ -193,8 +194,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + int i; WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); @@ -229,7 +231,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); if ((val & VIDEO_DIP_ENABLE) == 0) @@ -251,8 +253,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + int i; WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); @@ -289,8 +292,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return false; @@ -308,8 +310,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + int i; WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); @@ -344,8 +347,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return false; @@ -367,13 +369,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; - u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); - u32 data_reg; + i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); + i915_reg_t data_reg; int i; u32 val = I915_READ(ctl_reg); data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); - if (data_reg == 0) + if (i915_mmio_reg_valid(data_reg)) return; val &= ~hsw_infoframe_enable(type); @@ -401,8 +403,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); - u32 val = I915_READ(ctl_reg); + u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder)); return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | @@ -513,7 +514,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; - u32 reg = VIDEO_DIP_CTL; + i915_reg_t reg = VIDEO_DIP_CTL; u32 val = I915_READ(reg); u32 port = VIDEO_DIP_PORT(intel_dig_port->port); @@ -633,7 +634,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); - u32 reg, val = 0; + i915_reg_t reg; + u32 val = 0; if (HAS_DDI(dev_priv)) reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); @@ -666,7 +668,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; - u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); u32 port = VIDEO_DIP_PORT(intel_dig_port->port); @@ -717,7 +719,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); assert_hdmi_port_disabled(intel_hdmi); @@ -760,7 +762,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); u32 port = VIDEO_DIP_PORT(intel_dig_port->port); @@ -811,7 +813,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); + i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); u32 val = I915_READ(reg); assert_hdmi_port_disabled(intel_hdmi); @@ -1108,6 +1110,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) * matching DP port to be enabled on transcoder A. */ if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) { + /* + * We get CPU/PCH FIFO underruns on the other pipe when + * doing the workaround. Sweep them under the rug. + */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); + temp &= ~SDVO_PIPE_B_SELECT; temp |= SDVO_ENABLE; /* @@ -1122,6 +1131,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) temp &= ~SDVO_ENABLE; I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); + + intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } intel_hdmi->set_infoframes(&encoder->base, false, NULL); @@ -1335,21 +1348,18 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct intel_encoder *intel_encoder = - &hdmi_to_dig_port(intel_hdmi)->base; - enum intel_display_power_domain power_domain; struct edid *edid = NULL; bool connected = false; - power_domain = intel_display_port_power_domain(intel_encoder); - intel_display_power_get(dev_priv, power_domain); + if (force) { + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - if (force) edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus)); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + } to_intel_connector(connector)->detect_edid = edid; if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { @@ -1383,6 +1393,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + while (!live_status && --retry) { live_status = intel_digital_port_connected(dev_priv, hdmi_to_dig_port(intel_hdmi)); @@ -1402,6 +1414,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) } else status = connector_status_disconnected; + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + return status; } @@ -2039,7 +2053,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, * On BXT A0/A1, sw needs to activate DDIA HPD logic and * interrupts to check the external panel connection. */ - if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) intel_encoder->hpd_pin = HPD_PORT_A; else intel_encoder->hpd_pin = HPD_PORT_B; @@ -2131,8 +2145,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, } } -void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) +void intel_hdmi_init(struct drm_device *dev, + i915_reg_t hdmi_reg, enum port port) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; @@ -2201,8 +2217,9 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; intel_dig_port->port = port; + dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->hdmi.hdmi_reg = hdmi_reg; - intel_dig_port->dp.output_reg = 0; + intel_dig_port->dp.output_reg = INVALID_MMIO_REG; intel_hdmi_init_connector(intel_dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1369fc41d039..1110c83953cf 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -36,7 +36,7 @@ struct gmbus_pin { const char *name; - int reg; + i915_reg_t reg; }; /* Map gmbus pin pairs to names and registers. */ @@ -63,9 +63,9 @@ static const struct gmbus_pin gmbus_pins_skl[] = { }; static const struct gmbus_pin gmbus_pins_bxt[] = { - [GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB }, - [GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC }, - [GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD }, + [GMBUS_PIN_1_BXT] = { "dpb", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpc", GPIOC }, + [GMBUS_PIN_3_BXT] = { "misc", GPIOD }, }; /* pin is expected to be valid */ @@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, { if (IS_BROXTON(dev_priv)) return &gmbus_pins_bxt[pin]; - else if (IS_SKYLAKE(dev_priv)) + else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return &gmbus_pins_skl[pin]; else if (IS_BROADWELL(dev_priv)) return &gmbus_pins_bdw[pin]; @@ -89,14 +89,15 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, if (IS_BROXTON(dev_priv)) size = ARRAY_SIZE(gmbus_pins_bxt); - else if (IS_SKYLAKE(dev_priv)) + else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) size = ARRAY_SIZE(gmbus_pins_skl); else if (IS_BROADWELL(dev_priv)) size = ARRAY_SIZE(gmbus_pins_bdw); else size = ARRAY_SIZE(gmbus_pins); - return pin < size && get_gmbus_pin(dev_priv, pin)->reg; + return pin < size && + i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg); } /* Intel GPIO access functions */ @@ -240,9 +241,8 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin) algo = &bus->bit_algo; - bus->gpio_reg = dev_priv->gpio_mmio_base + - get_gmbus_pin(dev_priv, pin)->reg; - + bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base + + i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg)); bus->adapter.algo_data = algo; algo->setsda = set_data; algo->setscl = set_clock; @@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter, int i = 0, inc, try = 0; int ret = 0; - intel_aux_display_runtime_get(dev_priv); + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); mutex_lock(&dev_priv->gmbus_mutex); if (bus->force_bit) { @@ -595,7 +595,9 @@ timeout: out: mutex_unlock(&dev_priv->gmbus_mutex); - intel_aux_display_runtime_put(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + return ret; } @@ -626,12 +628,13 @@ int intel_setup_gmbus(struct drm_device *dev) if (HAS_PCH_NOP(dev)) return 0; - else if (HAS_PCH_SPLIT(dev)) - dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; - else if (IS_VALLEYVIEW(dev)) + + if (IS_VALLEYVIEW(dev)) dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; - else - dev_priv->gpio_mmio_base = 0; + else if (!HAS_GMCH_DISPLAY(dev_priv)) + dev_priv->gpio_mmio_base = + i915_mmio_reg_offset(PCH_GPIOA) - + i915_mmio_reg_offset(GPIOA); mutex_init(&dev_priv->gmbus_mutex); init_waitqueue_head(&dev_priv->gmbus_wait_queue); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 88e12bdf79e2..4ebafab53f30 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -190,16 +190,21 @@ #define GEN8_CTX_L3LLC_COHERENT (1<<5) #define GEN8_CTX_PRIVILEGE (1<<8) -#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ +#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \ + (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \ + (reg_state)[(pos)+1] = (val); \ +} while (0) + +#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ -} +} while (0) -#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ +#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ -} +} while (0) enum { ADVANCED_CONTEXT = 0, @@ -284,8 +289,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; - return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && + return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && (ring->id == VCS || ring->id == VCS2); } @@ -921,7 +926,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); - intel_logical_ring_emit(ringbuf, INSTPM); + intel_logical_ring_emit_reg(ringbuf, INSTPM); intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); intel_logical_ring_advance(ringbuf); @@ -1096,7 +1101,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_logical_ring_emit(ringbuf, w->reg[i].addr); + intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr); intel_logical_ring_emit(ringbuf, w->reg[i].value); } intel_logical_ring_emit(ringbuf, MI_NOOP); @@ -1120,6 +1125,8 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) batch[__index] = (cmd); \ } while (0) +#define wa_ctx_emit_reg(batch, index, reg) \ + wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg)) /* * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after @@ -1149,17 +1156,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) + if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); - wa_ctx_emit(batch, index, GEN8_L3SQCREG4); + wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); - wa_ctx_emit(batch, index, GEN8_L3SQCREG4); + wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, l3sqc4_flush); wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); @@ -1172,7 +1179,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); - wa_ctx_emit(batch, index, GEN8_L3SQCREG4); + wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, 0); @@ -1314,8 +1321,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1340,18 +1347,18 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring, uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); - wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); + wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); wa_ctx_emit(batch, index, MI_NOOP); } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1472,12 +1479,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); - if (ring->status_page.obj) { - I915_WRITE(RING_HWS_PGA(ring->mmio_base), - (u32)ring->status_page.gfx_addr); - POSTING_READ(RING_HWS_PGA(ring->mmio_base)); - } - I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); @@ -1562,9 +1563,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); + intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i)); intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); - intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); + intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i)); intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); } @@ -1923,6 +1924,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin i915_gem_batch_pool_init(dev, &ring->batch_pool); init_waitqueue_head(&ring->irq_queue); + INIT_LIST_HEAD(&ring->buffers); INIT_LIST_HEAD(&ring->execlist_queue); INIT_LIST_HEAD(&ring->execlist_retired_req_list); spin_lock_init(&ring->execlist_lock); @@ -1972,7 +1974,7 @@ static int logical_render_ring_init(struct drm_device *dev) ring->init_hw = gen8_init_render_ring; ring->init_context = gen8_init_rcs_context; ring->cleanup = intel_fini_pipe_control; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2024,7 +2026,7 @@ static int logical_bsd_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2079,7 +2081,7 @@ static int logical_blt_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2109,7 +2111,7 @@ static int logical_vebox_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2263,46 +2265,31 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o * only for the first context restore: on a subsequent save, the GPU will * recreate this batchbuffer with new values (including all the missing * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ - if (ring->id == RCS) - reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); - else - reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11); - reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; - reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); - reg_state[CTX_CONTEXT_CONTROL+1] = - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | - CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE); - reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); - reg_state[CTX_RING_HEAD+1] = 0; - reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); - reg_state[CTX_RING_TAIL+1] = 0; - reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); + reg_state[CTX_LRI_HEADER_0] = + MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; + ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | + CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE)); + ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); /* Ring buffer start address is not known until the buffer is pinned. * It is written to the context image in execlists_update_context() */ - reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); - reg_state[CTX_RING_BUFFER_CONTROL+1] = - ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; - reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; - reg_state[CTX_BB_HEAD_U+1] = 0; - reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; - reg_state[CTX_BB_HEAD_L+1] = 0; - reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; - reg_state[CTX_BB_STATE+1] = (1<<5); - reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; - reg_state[CTX_SECOND_BB_HEAD_U+1] = 0; - reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114; - reg_state[CTX_SECOND_BB_HEAD_L+1] = 0; - reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; - reg_state[CTX_SECOND_BB_STATE+1] = 0; + ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base), + ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); + ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base), + RING_BB_PPGTT); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0); if (ring->id == RCS) { - reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; - reg_state[CTX_BB_PER_CTX_PTR+1] = 0; - reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; - reg_state[CTX_RCS_INDIRECT_CTX+1] = 0; - reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; - reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0; + ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0); if (ring->wa_ctx.obj) { struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); @@ -2319,18 +2306,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o 0x01; } } - reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); - reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; - reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; - reg_state[CTX_CTX_TIMESTAMP+1] = 0; - reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); - reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); - reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); - reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); - reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); - reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); - reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); - reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); + reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; + ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0); + /* PDP values well be assigned later if needed */ + ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0); if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { /* 64b PPGTT (48bit canonical) @@ -2352,8 +2338,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o if (ring->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); - reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; - reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev); + ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, + make_rpcs(dev)); } kunmap_atomic(reg_state); diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 4e60d54ba66d..0b821b91723a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -29,16 +29,16 @@ #define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ -#define RING_ELSP(ring) ((ring)->mmio_base+0x230) -#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234) -#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4) -#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) +#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230) +#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234) +#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4) +#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) -#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8) -#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4) -#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) +#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8) +#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) +#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) /* Logical Rings */ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); @@ -70,6 +70,11 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, iowrite32(data, ringbuf->virtual_start + ringbuf->tail); ringbuf->tail += 4; } +static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, + i915_reg_t reg) +{ + intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg)); +} /* Logical Ring Contexts */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 7f39b8ad88ae..61f1145f6579 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -51,7 +51,7 @@ struct intel_lvds_encoder { struct intel_encoder base; bool is_dual_link; - u32 reg; + i915_reg_t reg; u32 a3_power; struct intel_lvds_connector *attached_connector; @@ -210,7 +210,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) struct intel_connector *intel_connector = &lvds_encoder->attached_connector->base; struct drm_i915_private *dev_priv = dev->dev_private; - u32 ctl_reg, stat_reg; + i915_reg_t ctl_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; @@ -235,7 +235,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder) struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = dev->dev_private; - u32 ctl_reg, stat_reg; + i915_reg_t ctl_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; @@ -939,7 +939,7 @@ void intel_lvds_init(struct drm_device *dev) struct drm_display_mode *downclock_mode = NULL; struct edid *edid; struct drm_crtc *crtc; - u32 lvds_reg; + i915_reg_t lvds_reg; u32 lvds; int pipe; u8 pin; @@ -1164,8 +1164,7 @@ out: DRM_DEBUG_KMS("detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); - lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & - LVDS_A3_POWER_MASK; + lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; lvds_connector->lid_notifier.notifier_call = intel_lid_notify; if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 6d3c6c0a5c62..fed7bea19cc9 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -143,7 +143,7 @@ static bool get_mocs_settings(struct drm_device *dev, { bool result = false; - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { table->size = ARRAY_SIZE(skylake_mocs_table); table->table = skylake_mocs_table; result = true; @@ -159,11 +159,30 @@ static bool get_mocs_settings(struct drm_device *dev, return result; } +static i915_reg_t mocs_register(enum intel_ring_id ring, int index) +{ + switch (ring) { + case RCS: + return GEN9_GFX_MOCS(index); + case VCS: + return GEN9_MFX0_MOCS(index); + case BCS: + return GEN9_BLT_MOCS(index); + case VECS: + return GEN9_VEBOX_MOCS(index); + case VCS2: + return GEN9_MFX1_MOCS(index); + default: + MISSING_CASE(ring); + return INVALID_MMIO_REG; + } +} + /** * emit_mocs_control_table() - emit the mocs control table * @req: Request to set up the MOCS table for. * @table: The values to program into the control regs. - * @reg_base: The base for the engine that needs to be programmed. + * @ring: The engine for whom to emit the registers. * * This function simply emits a MI_LOAD_REGISTER_IMM command for the * given table starting at the given address. @@ -172,7 +191,7 @@ static bool get_mocs_settings(struct drm_device *dev, */ static int emit_mocs_control_table(struct drm_i915_gem_request *req, const struct drm_i915_mocs_table *table, - u32 reg_base) + enum intel_ring_id ring) { struct intel_ringbuffer *ringbuf = req->ringbuf; unsigned int index; @@ -191,7 +210,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); for (index = 0; index < table->size; index++) { - intel_logical_ring_emit(ringbuf, reg_base + index * 4); + intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index)); intel_logical_ring_emit(ringbuf, table->table[index].control_value); } @@ -205,7 +224,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, * that value to all the used entries. */ for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { - intel_logical_ring_emit(ringbuf, reg_base + index * 4); + intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index)); intel_logical_ring_emit(ringbuf, table->table[0].control_value); } @@ -253,7 +272,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, value = (table->table[count].l3cc_value & 0xffff) | ((table->table[count + 1].l3cc_value & 0xffff) << 16); - intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); + intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); intel_logical_ring_emit(ringbuf, value); } @@ -270,7 +289,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, * they are reserved by the hardware. */ for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { - intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); + intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); intel_logical_ring_emit(ringbuf, value); value = filler; @@ -304,26 +323,16 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) int ret; if (get_mocs_settings(req->ring->dev, &t)) { - /* Program the control registers */ - ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0); - if (ret) - return ret; - - ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0); - if (ret) - return ret; + struct drm_i915_private *dev_priv = req->i915; + struct intel_engine_cs *ring; + enum intel_ring_id ring_id; - ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0); - if (ret) - return ret; - - ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0); - if (ret) - return ret; - - ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0); - if (ret) - return ret; + /* Program the control registers */ + for_each_ring(ring, dev_priv, ring_id) { + ret = emit_mocs_control_table(req, &t, ring_id); + if (ret) + return ret; + } /* Now program the l3cc registers */ ret = emit_mocs_l3cc_table(req, &t); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 6dc13c02c28e..e362a30776fa 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev) } if (!acpi_video_bus) { - DRM_ERROR("No ACPI video bus found\n"); + DRM_DEBUG_KMS("No ACPI video bus found\n"); return; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 444542696a2c..76f1980a7541 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -749,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL, + ret = i915_gem_object_pin_to_display_plane(new_bo, 0, &i915_ggtt_view_normal); if (ret != 0) return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 071a76b9ac52..96f45d7b3e4b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1708,13 +1708,6 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; } -struct skl_pipe_wm_parameters { - bool active; - uint32_t pipe_htotal; - uint32_t pixel_rate; /* in KHz */ - struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; -}; - struct ilk_wm_maximums { uint16_t pri; uint16_t spr; @@ -1722,13 +1715,6 @@ struct ilk_wm_maximums { uint16_t fbc; }; -/* used in computing the new watermarks state */ -struct intel_wm_config { - unsigned int num_pipes_active; - bool sprites_enabled; - bool sprites_scaled; -}; - /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. @@ -1979,9 +1965,11 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_crtc *intel_crtc, int level, struct intel_crtc_state *cstate, + struct intel_plane_state *pristate, + struct intel_plane_state *sprstate, + struct intel_plane_state *curstate, struct intel_wm_level *result) { - struct intel_plane *intel_plane; uint16_t pri_latency = dev_priv->wm.pri_latency[level]; uint16_t spr_latency = dev_priv->wm.spr_latency[level]; uint16_t cur_latency = dev_priv->wm.cur_latency[level]; @@ -1993,29 +1981,11 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, cur_latency *= 5; } - for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) { - struct intel_plane_state *pstate = - to_intel_plane_state(intel_plane->base.state); - - switch (intel_plane->base.type) { - case DRM_PLANE_TYPE_PRIMARY: - result->pri_val = ilk_compute_pri_wm(cstate, pstate, - pri_latency, - level); - result->fbc_val = ilk_compute_fbc_wm(cstate, pstate, - result->pri_val); - break; - case DRM_PLANE_TYPE_OVERLAY: - result->spr_val = ilk_compute_spr_wm(cstate, pstate, - spr_latency); - break; - case DRM_PLANE_TYPE_CURSOR: - result->cur_val = ilk_compute_cur_wm(cstate, pstate, - cur_latency); - break; - } - } - + result->pri_val = ilk_compute_pri_wm(cstate, pristate, + pri_latency, level); + result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); + result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); + result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); result->enable = true; } @@ -2274,34 +2244,19 @@ static void skl_setup_wm_latency(struct drm_device *dev) intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); } -static void ilk_compute_wm_config(struct drm_device *dev, - struct intel_wm_config *config) -{ - struct intel_crtc *intel_crtc; - - /* Compute the currently _active_ config */ - for_each_intel_crtc(dev, intel_crtc) { - const struct intel_pipe_wm *wm = &intel_crtc->wm.active; - - if (!wm->pipe_enabled) - continue; - - config->sprites_enabled |= wm->sprites_enabled; - config->sprites_scaled |= wm->sprites_scaled; - config->num_pipes_active++; - } -} - /* Compute new watermarks for the pipe */ -static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, - struct intel_pipe_wm *pipe_wm) +static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, + struct drm_atomic_state *state) { - struct drm_crtc *crtc = cstate->base.crtc; - struct drm_device *dev = crtc->dev; + struct intel_pipe_wm *pipe_wm; + struct drm_device *dev = intel_crtc->base.dev; const struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *cstate = NULL; struct intel_plane *intel_plane; + struct drm_plane_state *ps; + struct intel_plane_state *pristate = NULL; struct intel_plane_state *sprstate = NULL; + struct intel_plane_state *curstate = NULL; int level, max_level = ilk_wm_max_level(dev); /* LP0 watermark maximums depend on this pipe alone */ struct intel_wm_config config = { @@ -2309,11 +2264,24 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, }; struct ilk_wm_maximums max; + cstate = intel_atomic_get_crtc_state(state, intel_crtc); + if (IS_ERR(cstate)) + return PTR_ERR(cstate); + + pipe_wm = &cstate->wm.optimal.ilk; + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) { - sprstate = to_intel_plane_state(intel_plane->base.state); - break; - } + ps = drm_atomic_get_plane_state(state, + &intel_plane->base); + if (IS_ERR(ps)) + return PTR_ERR(ps); + + if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) + pristate = to_intel_plane_state(ps); + else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) + sprstate = to_intel_plane_state(ps); + else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) + curstate = to_intel_plane_state(ps); } config.sprites_enabled = sprstate->visible; @@ -2322,7 +2290,7 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); pipe_wm->pipe_enabled = cstate->base.active; - pipe_wm->sprites_enabled = sprstate->visible; + pipe_wm->sprites_enabled = config.sprites_enabled; pipe_wm->sprites_scaled = config.sprites_scaled; /* ILK/SNB: LP2+ watermarks only w/o sprites */ @@ -2333,24 +2301,27 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, if (config.sprites_scaled) max_level = 0; - ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]); + ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, + pristate, sprstate, curstate, &pipe_wm->wm[0]); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); + pipe_wm->linetime = hsw_compute_linetime_wm(dev, + &intel_crtc->base); /* LP0 watermarks always use 1/2 DDB partitioning */ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); /* At least LP0 must be valid */ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) - return false; + return -EINVAL; ilk_compute_wm_reg_maximums(dev, 1, &max); for (level = 1; level <= max_level; level++) { struct intel_wm_level wm = {}; - ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm); + ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, + pristate, sprstate, curstate, &wm); /* * Disable any watermark level that exceeds the @@ -2363,7 +2334,7 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, pipe_wm->wm[level] = wm; } - return true; + return 0; } /* @@ -2378,7 +2349,9 @@ static void ilk_merge_wm_level(struct drm_device *dev, ret_wm->enable = true; for_each_intel_crtc(dev, intel_crtc) { - const struct intel_pipe_wm *active = &intel_crtc->wm.active; + const struct intel_crtc_state *cstate = + to_intel_crtc_state(intel_crtc->base.state); + const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; const struct intel_wm_level *wm = &active->wm[level]; if (!active->pipe_enabled) @@ -2526,14 +2499,15 @@ static void ilk_compute_wm_results(struct drm_device *dev, /* LP0 register values */ for_each_intel_crtc(dev, intel_crtc) { + const struct intel_crtc_state *cstate = + to_intel_crtc_state(intel_crtc->base.state); enum pipe pipe = intel_crtc->pipe; - const struct intel_wm_level *r = - &intel_crtc->wm.active.wm[0]; + const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0]; if (WARN_ON(!r->enable)) continue; - results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; + results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime; results->wm_pipe[pipe] = (r->pri_val << WM0_PIPE_PLANE_SHIFT) | @@ -2755,18 +2729,40 @@ static bool ilk_disable_lp_wm(struct drm_device *dev) #define SKL_DDB_SIZE 896 /* in blocks */ #define BXT_DDB_SIZE 512 +/* + * Return the index of a plane in the SKL DDB and wm result arrays. Primary + * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and + * other universal planes are in indices 1..n. Note that this may leave unused + * indices between the top "sprite" plane and the cursor. + */ +static int +skl_wm_plane_id(const struct intel_plane *plane) +{ + switch (plane->base.type) { + case DRM_PLANE_TYPE_PRIMARY: + return 0; + case DRM_PLANE_TYPE_CURSOR: + return PLANE_CURSOR; + case DRM_PLANE_TYPE_OVERLAY: + return plane->plane + 1; + default: + MISSING_CASE(plane->base.type); + return plane->plane; + } +} + static void skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, - struct drm_crtc *for_crtc, + const struct intel_crtc_state *cstate, const struct intel_wm_config *config, - const struct skl_pipe_wm_parameters *params, struct skl_ddb_entry *alloc /* out */) { + struct drm_crtc *for_crtc = cstate->base.crtc; struct drm_crtc *crtc; unsigned int pipe_size, ddb_size; int nth_active_pipe; - if (!params->active) { + if (!cstate->base.active) { alloc->start = 0; alloc->end = 0; return; @@ -2837,19 +2833,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, } static unsigned int -skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) +skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, + const struct drm_plane_state *pstate, + int y) { + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_framebuffer *fb = pstate->fb; /* for planar format */ - if (p->y_bytes_per_pixel) { + if (fb->pixel_format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ - return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; + return intel_crtc->config->pipe_src_w * + intel_crtc->config->pipe_src_h * + drm_format_plane_cpp(fb->pixel_format, 0); else /* uv-plane data rate */ - return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; + return (intel_crtc->config->pipe_src_w/2) * + (intel_crtc->config->pipe_src_h/2) * + drm_format_plane_cpp(fb->pixel_format, 1); } /* for packed formats */ - return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; + return intel_crtc->config->pipe_src_w * + intel_crtc->config->pipe_src_h * + drm_format_plane_cpp(fb->pixel_format, 0); } /* @@ -2858,46 +2864,55 @@ skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) * 3 * 4096 * 8192 * 4 < 2^32 */ static unsigned int -skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, - const struct skl_pipe_wm_parameters *params) +skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) { + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_device *dev = intel_crtc->base.dev; + const struct intel_plane *intel_plane; unsigned int total_data_rate = 0; - int plane; - for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { - const struct intel_plane_wm_parameters *p; + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + const struct drm_plane_state *pstate = intel_plane->base.state; - p = ¶ms->plane[plane]; - if (!p->enabled) + if (pstate->fb == NULL) continue; - total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ - if (p->y_bytes_per_pixel) { - total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ - } + if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) + continue; + + /* packed/uv */ + total_data_rate += skl_plane_relative_data_rate(cstate, + pstate, + 0); + + if (pstate->fb->pixel_format == DRM_FORMAT_NV12) + /* y-plane */ + total_data_rate += skl_plane_relative_data_rate(cstate, + pstate, + 1); } return total_data_rate; } static void -skl_allocate_pipe_ddb(struct drm_crtc *crtc, - const struct intel_wm_config *config, - const struct skl_pipe_wm_parameters *params, +skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb /* out */) { + struct drm_crtc *crtc = cstate->base.crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_wm_config *config = &dev_priv->wm.config; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane *intel_plane; enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; uint16_t alloc_size, start, cursor_blocks; uint16_t minimum[I915_MAX_PLANES]; uint16_t y_minimum[I915_MAX_PLANES]; unsigned int total_data_rate; - int plane; - skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); + skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) { memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); @@ -2914,17 +2929,20 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, alloc->end -= cursor_blocks; /* 1. Allocate the mininum required blocks for each active plane */ - for_each_plane(dev_priv, pipe, plane) { - const struct intel_plane_wm_parameters *p; + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + struct drm_plane *plane = &intel_plane->base; + struct drm_framebuffer *fb = plane->state->fb; + int id = skl_wm_plane_id(intel_plane); - p = ¶ms->plane[plane]; - if (!p->enabled) + if (fb == NULL) + continue; + if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; - minimum[plane] = 8; - alloc_size -= minimum[plane]; - y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; - alloc_size -= y_minimum[plane]; + minimum[id] = 8; + alloc_size -= minimum[id]; + y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; + alloc_size -= y_minimum[id]; } /* @@ -2933,45 +2951,50 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, * * FIXME: we may not allocate every single block here. */ - total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); + total_data_rate = skl_get_total_relative_data_rate(cstate); start = alloc->start; - for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { - const struct intel_plane_wm_parameters *p; + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + struct drm_plane *plane = &intel_plane->base; + struct drm_plane_state *pstate = intel_plane->base.state; unsigned int data_rate, y_data_rate; uint16_t plane_blocks, y_plane_blocks = 0; + int id = skl_wm_plane_id(intel_plane); - p = ¶ms->plane[plane]; - if (!p->enabled) + if (pstate->fb == NULL) + continue; + if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; - data_rate = skl_plane_relative_data_rate(p, 0); + data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); /* * allocation for (packed formats) or (uv-plane part of planar format): * promote the expression to 64 bits to avoid overflowing, the * result is < available as data_rate / total_data_rate < 1 */ - plane_blocks = minimum[plane]; + plane_blocks = minimum[id]; plane_blocks += div_u64((uint64_t)alloc_size * data_rate, total_data_rate); - ddb->plane[pipe][plane].start = start; - ddb->plane[pipe][plane].end = start + plane_blocks; + ddb->plane[pipe][id].start = start; + ddb->plane[pipe][id].end = start + plane_blocks; start += plane_blocks; /* * allocation for y_plane part of planar format: */ - if (p->y_bytes_per_pixel) { - y_data_rate = skl_plane_relative_data_rate(p, 1); - y_plane_blocks = y_minimum[plane]; + if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { + y_data_rate = skl_plane_relative_data_rate(cstate, + pstate, + 1); + y_plane_blocks = y_minimum[id]; y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, total_data_rate); - ddb->y_plane[pipe][plane].start = start; - ddb->y_plane[pipe][plane].end = start + y_plane_blocks; + ddb->y_plane[pipe][id].start = start; + ddb->y_plane[pipe][id].end = start + y_plane_blocks; start += y_plane_blocks; } @@ -3041,104 +3064,27 @@ static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; - enum pipe pipe = intel_crtc->pipe; - - if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe], - sizeof(new_ddb->plane[pipe]))) - return true; - if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR], - sizeof(new_ddb->plane[pipe][PLANE_CURSOR]))) + /* + * If ddb allocation of pipes changed, it may require recalculation of + * watermarks + */ + if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) return true; return false; } -static void skl_compute_wm_global_parameters(struct drm_device *dev, - struct intel_wm_config *config) -{ - struct drm_crtc *crtc; - struct drm_plane *plane; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - config->num_pipes_active += to_intel_crtc(crtc)->active; - - /* FIXME: I don't think we need those two global parameters on SKL */ - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - struct intel_plane *intel_plane = to_intel_plane(plane); - - config->sprites_enabled |= intel_plane->wm.enabled; - config->sprites_scaled |= intel_plane->wm.scaled; - } -} - -static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, - struct skl_pipe_wm_parameters *p) -{ - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; - struct drm_plane *plane; - struct drm_framebuffer *fb; - int i = 1; /* Index for sprite planes start */ - - p->active = intel_crtc->active; - if (p->active) { - p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; - p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); - - fb = crtc->primary->state->fb; - /* For planar: Bpp is for uv plane, y_Bpp is for y plane */ - if (fb) { - p->plane[0].enabled = true; - p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? - drm_format_plane_cpp(fb->pixel_format, 1) : - drm_format_plane_cpp(fb->pixel_format, 0); - p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? - drm_format_plane_cpp(fb->pixel_format, 0) : 0; - p->plane[0].tiling = fb->modifier[0]; - } else { - p->plane[0].enabled = false; - p->plane[0].bytes_per_pixel = 0; - p->plane[0].y_bytes_per_pixel = 0; - p->plane[0].tiling = DRM_FORMAT_MOD_NONE; - } - p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; - p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; - p->plane[0].rotation = crtc->primary->state->rotation; - - fb = crtc->cursor->state->fb; - p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0; - if (fb) { - p->plane[PLANE_CURSOR].enabled = true; - p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8; - p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w; - p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h; - } else { - p->plane[PLANE_CURSOR].enabled = false; - p->plane[PLANE_CURSOR].bytes_per_pixel = 0; - p->plane[PLANE_CURSOR].horiz_pixels = 64; - p->plane[PLANE_CURSOR].vert_pixels = 64; - } - } - - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - struct intel_plane *intel_plane = to_intel_plane(plane); - - if (intel_plane->pipe == pipe && - plane->type == DRM_PLANE_TYPE_OVERLAY) - p->plane[i++] = intel_plane->wm; - } -} - static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - struct skl_pipe_wm_parameters *p, - struct intel_plane_wm_parameters *p_params, + struct intel_crtc_state *cstate, + struct intel_plane *intel_plane, uint16_t ddb_allocation, int level, uint16_t *out_blocks, /* out */ uint8_t *out_lines /* out */) { + struct drm_plane *plane = &intel_plane->base; + struct drm_framebuffer *fb = plane->state->fb; uint32_t latency = dev_priv->wm.skl_latency[level]; uint32_t method1, method2; uint32_t plane_bytes_per_line, plane_blocks_per_line; @@ -3146,31 +3092,33 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t selected_result; uint8_t bytes_per_pixel; - if (latency == 0 || !p->active || !p_params->enabled) + if (latency == 0 || !cstate->base.active || !fb) return false; - bytes_per_pixel = p_params->y_bytes_per_pixel ? - p_params->y_bytes_per_pixel : - p_params->bytes_per_pixel; - method1 = skl_wm_method1(p->pixel_rate, + bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0); + method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), bytes_per_pixel, latency); - method2 = skl_wm_method2(p->pixel_rate, - p->pipe_htotal, - p_params->horiz_pixels, + method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), + cstate->base.adjusted_mode.crtc_htotal, + cstate->pipe_src_w, bytes_per_pixel, - p_params->tiling, + fb->modifier[0], latency); - plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; + plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); - if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || - p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { uint32_t min_scanlines = 4; uint32_t y_tile_minimum; - if (intel_rotation_90_or_270(p_params->rotation)) { - switch (p_params->bytes_per_pixel) { + if (intel_rotation_90_or_270(plane->state->rotation)) { + int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ? + drm_format_plane_cpp(fb->pixel_format, 1) : + drm_format_plane_cpp(fb->pixel_format, 0); + + switch (bpp) { case 1: min_scanlines = 16; break; @@ -3194,8 +3142,8 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); if (level >= 1 && level <= 7) { - if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || - p_params->tiling == I915_FORMAT_MOD_Yf_TILED) + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) res_lines += 4; else res_blocks++; @@ -3212,84 +3160,80 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb, - struct skl_pipe_wm_parameters *p, - enum pipe pipe, + struct intel_crtc_state *cstate, int level, - int num_planes, struct skl_wm_level *result) { + struct drm_device *dev = dev_priv->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct intel_plane *intel_plane; uint16_t ddb_blocks; - int i; + enum pipe pipe = intel_crtc->pipe; + + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + int i = skl_wm_plane_id(intel_plane); - for (i = 0; i < num_planes; i++) { ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); result->plane_en[i] = skl_compute_plane_wm(dev_priv, - p, &p->plane[i], + cstate, + intel_plane, ddb_blocks, level, &result->plane_res_b[i], &result->plane_res_l[i]); } - - ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]); - result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p, - &p->plane[PLANE_CURSOR], - ddb_blocks, level, - &result->plane_res_b[PLANE_CURSOR], - &result->plane_res_l[PLANE_CURSOR]); } static uint32_t -skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) +skl_compute_linetime_wm(struct intel_crtc_state *cstate) { - if (!to_intel_crtc(crtc)->active) + if (!cstate->base.active) return 0; - if (WARN_ON(p->pixel_rate == 0)) + if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) return 0; - return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); + return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, + skl_pipe_pixel_rate(cstate)); } -static void skl_compute_transition_wm(struct drm_crtc *crtc, - struct skl_pipe_wm_parameters *params, +static void skl_compute_transition_wm(struct intel_crtc_state *cstate, struct skl_wm_level *trans_wm /* out */) { + struct drm_crtc *crtc = cstate->base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int i; + struct intel_plane *intel_plane; - if (!params->active) + if (!cstate->base.active) return; /* Until we know more, just disable transition WMs */ - for (i = 0; i < intel_num_planes(intel_crtc); i++) + for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) { + int i = skl_wm_plane_id(intel_plane); + trans_wm->plane_en[i] = false; - trans_wm->plane_en[PLANE_CURSOR] = false; + } } -static void skl_compute_pipe_wm(struct drm_crtc *crtc, +static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb, - struct skl_pipe_wm_parameters *params, struct skl_pipe_wm *pipe_wm) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int level, max_level = ilk_wm_max_level(dev); for (level = 0; level <= max_level; level++) { - skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, - level, intel_num_planes(intel_crtc), - &pipe_wm->wm[level]); + skl_compute_wm_level(dev_priv, ddb, cstate, + level, &pipe_wm->wm[level]); } - pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); + pipe_wm->linetime = skl_compute_linetime_wm(cstate); - skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); + skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); } static void skl_compute_wm_results(struct drm_device *dev, - struct skl_pipe_wm_parameters *p, struct skl_pipe_wm *p_wm, struct skl_wm_values *r, struct intel_crtc *intel_crtc) @@ -3346,7 +3290,8 @@ static void skl_compute_wm_results(struct drm_device *dev, r->wm_linetime[pipe] = p_wm->linetime; } -static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg, +static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, + i915_reg_t reg, const struct skl_ddb_entry *entry) { if (entry->end) @@ -3533,28 +3478,25 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv, } static bool skl_update_pipe_wm(struct drm_crtc *crtc, - struct skl_pipe_wm_parameters *params, - struct intel_wm_config *config, struct skl_ddb_allocation *ddb, /* out */ struct skl_pipe_wm *pipe_wm /* out */) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - skl_compute_wm_pipe_parameters(crtc, params); - skl_allocate_pipe_ddb(crtc, config, params, ddb); - skl_compute_pipe_wm(crtc, ddb, params, pipe_wm); + skl_allocate_pipe_ddb(cstate, ddb); + skl_compute_pipe_wm(cstate, ddb, pipe_wm); - if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) + if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) return false; - intel_crtc->wm.skl_active = *pipe_wm; + intel_crtc->wm.active.skl = *pipe_wm; return true; } static void skl_update_other_pipe_wm(struct drm_device *dev, struct drm_crtc *crtc, - struct intel_wm_config *config, struct skl_wm_values *r) { struct intel_crtc *intel_crtc; @@ -3575,7 +3517,6 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, */ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { - struct skl_pipe_wm_parameters params = {}; struct skl_pipe_wm pipe_wm = {}; bool wm_changed; @@ -3586,7 +3527,6 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, continue; wm_changed = skl_update_pipe_wm(&intel_crtc->base, - ¶ms, config, &r->ddb, &pipe_wm); /* @@ -3596,7 +3536,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, */ WARN_ON(!wm_changed); - skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc); + skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); r->dirty[intel_crtc->pipe] = true; } } @@ -3626,10 +3566,9 @@ static void skl_update_wm(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct skl_pipe_wm_parameters params = {}; struct skl_wm_values *results = &dev_priv->wm.skl_results; - struct skl_pipe_wm pipe_wm = {}; - struct intel_wm_config config = {}; + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; /* Clear all dirty flags */ @@ -3637,16 +3576,13 @@ static void skl_update_wm(struct drm_crtc *crtc) skl_clear_wm(results, intel_crtc->pipe); - skl_compute_wm_global_parameters(dev, &config); - - if (!skl_update_pipe_wm(crtc, ¶ms, &config, - &results->ddb, &pipe_wm)) + if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) return; - skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc); + skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); results->dirty[intel_crtc->pipe] = true; - skl_update_other_pipe_wm(dev, crtc, &config, results); + skl_update_other_pipe_wm(dev, crtc, results); skl_write_wm_values(dev_priv, results); skl_flush_wm_values(dev_priv, results); @@ -3654,71 +3590,23 @@ static void skl_update_wm(struct drm_crtc *crtc) dev_priv->wm.skl_hw = *results; } -static void -skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, - uint32_t sprite_width, uint32_t sprite_height, - int pixel_size, bool enabled, bool scaled) -{ - struct intel_plane *intel_plane = to_intel_plane(plane); - struct drm_framebuffer *fb = plane->state->fb; - - intel_plane->wm.enabled = enabled; - intel_plane->wm.scaled = scaled; - intel_plane->wm.horiz_pixels = sprite_width; - intel_plane->wm.vert_pixels = sprite_height; - intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE; - - /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */ - intel_plane->wm.bytes_per_pixel = - (fb && fb->pixel_format == DRM_FORMAT_NV12) ? - drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size; - intel_plane->wm.y_bytes_per_pixel = - (fb && fb->pixel_format == DRM_FORMAT_NV12) ? - drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0; - - /* - * Framebuffer can be NULL on plane disable, but it does not - * matter for watermarks if we assume no tiling in that case. - */ - if (fb) - intel_plane->wm.tiling = fb->modifier[0]; - intel_plane->wm.rotation = plane->state->rotation; - - skl_update_wm(crtc); -} - -static void ilk_update_wm(struct drm_crtc *crtc) +static void ilk_program_watermarks(struct drm_i915_private *dev_priv) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; + struct intel_wm_config *config = &dev_priv->wm.config; struct ilk_wm_values results = {}; enum intel_ddb_partitioning partitioning; - struct intel_pipe_wm pipe_wm = {}; - struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; - struct intel_wm_config config = {}; - - WARN_ON(cstate->base.active != intel_crtc->active); - - intel_compute_pipe_wm(cstate, &pipe_wm); - if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) - return; - - intel_crtc->wm.active = pipe_wm; - - ilk_compute_wm_config(dev, &config); - - ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); + ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev, config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ if (INTEL_INFO(dev)->gen >= 7 && - config.num_pipes_active == 1 && config.sprites_enabled) { - ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); + config->num_pipes_active == 1 && config->sprites_enabled) { + ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev, config, &max, &lp_wm_5_6); best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); } else { @@ -3733,14 +3621,13 @@ static void ilk_update_wm(struct drm_crtc *crtc) ilk_write_wm_values(dev_priv, &results); } -static void -ilk_update_sprite_wm(struct drm_plane *plane, - struct drm_crtc *crtc, - uint32_t sprite_width, uint32_t sprite_height, - int pixel_size, bool enabled, bool scaled) +static void ilk_update_wm(struct drm_crtc *crtc) { - struct drm_device *dev = plane->dev; - struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + + WARN_ON(cstate->base.active != intel_crtc->active); /* * IVB workaround: must disable low power watermarks for at least @@ -3749,10 +3636,14 @@ ilk_update_sprite_wm(struct drm_plane *plane, * * WaCxSRDisabledForSpriteScaling:ivb */ - if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) - intel_wait_for_vblank(dev, intel_plane->pipe); + if (cstate->disable_lp_wm) { + ilk_disable_lp_wm(crtc->dev); + intel_wait_for_vblank(crtc->dev, intel_crtc->pipe); + } + + intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; - ilk_update_wm(crtc); + ilk_program_watermarks(dev_priv); } static void skl_pipe_wm_active_state(uint32_t val, @@ -3805,7 +3696,8 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct skl_wm_values *hw = &dev_priv->wm.skl_hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct skl_pipe_wm *active = &intel_crtc->wm.skl_active; + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct skl_pipe_wm *active = &cstate->wm.optimal.skl; enum pipe pipe = intel_crtc->pipe; int level, i, max_level; uint32_t temp; @@ -3849,6 +3741,8 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) temp = hw->plane_trans[pipe][PLANE_CURSOR]; skl_pipe_wm_active_state(temp, active, true, true, i, 0); + + intel_crtc->wm.active.skl = *active; } void skl_wm_get_hw_state(struct drm_device *dev) @@ -3868,9 +3762,10 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct ilk_wm_values *hw = &dev_priv->wm.hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_pipe_wm *active = &intel_crtc->wm.active; + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; enum pipe pipe = intel_crtc->pipe; - static const unsigned int wm0_pipe_reg[] = { + static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, [PIPE_B] = WM0_PIPEB_ILK, [PIPE_C] = WM0_PIPEC_IVB, @@ -3907,6 +3802,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) for (level = 0; level <= max_level; level++) active->wm[level].enable = true; } + + intel_crtc->wm.active.ilk = *active; } #define _FW_WM(value, plane) \ @@ -4132,21 +4029,6 @@ void intel_update_watermarks(struct drm_crtc *crtc) dev_priv->display.update_wm(crtc); } -void intel_update_sprite_watermarks(struct drm_plane *plane, - struct drm_crtc *crtc, - uint32_t sprite_width, - uint32_t sprite_height, - int pixel_size, - bool enabled, bool scaled) -{ - struct drm_i915_private *dev_priv = plane->dev->dev_private; - - if (dev_priv->display.update_sprite_wm) - dev_priv->display.update_sprite_wm(plane, crtc, - sprite_width, sprite_height, - pixel_size, enabled, scaled); -} - /** * Lock protecting IPS related data structures */ @@ -4414,7 +4296,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) struct drm_i915_private *dev_priv = dev->dev_private; /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) return; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4689,7 +4571,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; - if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev) || + IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { ret = sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status); @@ -4701,7 +4584,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq); } - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { /* Store the frequency values in 16.66 MHZ units, which is the natural hardware unit for SKL */ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; @@ -4738,7 +4621,7 @@ static void gen9_enable_rps(struct drm_device *dev) gen6_init_rps_frequencies(dev); /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return; } @@ -4783,7 +4666,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && - (INTEL_REVID(dev) <= SKL_REVID_E0))) + IS_SKL_REVID(dev, 0, SKL_REVID_E0))) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); else I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); @@ -4807,8 +4690,8 @@ static void gen9_enable_rc6(struct drm_device *dev) DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); /* WaRsUseTimeoutMode */ - if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | @@ -4824,8 +4707,9 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || + ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && + IS_SKL_REVID(dev, 0, SKL_REVID_E0))) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? @@ -5056,7 +4940,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; @@ -5074,7 +4958,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. @@ -6202,7 +6086,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) } else if (INTEL_INFO(dev)->gen >= 9) { gen9_enable_rc6(dev); gen9_enable_rps(dev); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) __gen6_update_ring_freq(dev); } else if (IS_BROADWELL(dev)) { gen8_enable_rps(dev); @@ -7058,7 +6942,6 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.init_clock_gating = bxt_init_clock_gating; dev_priv->display.update_wm = skl_update_wm; - dev_priv->display.update_sprite_wm = skl_update_sprite_wm; } else if (HAS_PCH_SPLIT(dev)) { ilk_setup_wm_latency(dev); @@ -7067,7 +6950,7 @@ void intel_init_pm(struct drm_device *dev) (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { dev_priv->display.update_wm = ilk_update_wm; - dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; + dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 213581c215b3..bc5ea2a6cf4c 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -80,7 +80,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; - u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); + i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); uint32_t *data = (uint32_t *) vsc_psr; unsigned int i; @@ -151,13 +151,31 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp) DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); } +static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum port port) +{ + if (INTEL_INFO(dev_priv)->gen >= 9) + return DP_AUX_CH_CTL(port); + else + return EDP_PSR_AUX_CTL; +} + +static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, + enum port port, int index) +{ + if (INTEL_INFO(dev_priv)->gen >= 9) + return DP_AUX_CH_DATA(port, index); + else + return EDP_PSR_AUX_DATA(index); +} + static void hsw_psr_enable_sink(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t aux_clock_divider; - uint32_t aux_data_reg, aux_ctl_reg; + i915_reg_t aux_ctl_reg; int precharge = 0x3; static const uint8_t aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, @@ -166,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) [3] = 1 - 1, [4] = DP_SET_POWER_D0, }; + enum port port = dig_port->port; int i; BUILD_BUG_ON(sizeof(aux_msg) > 20); @@ -181,14 +200,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, DP_AUX_FRAME_SYNC_ENABLE); - aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ? - DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev); - aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ? - DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev); + aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); /* Setup AUX registers */ for (i = 0; i < sizeof(aux_msg); i += 4) - I915_WRITE(aux_data_reg + i, + I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); if (INTEL_INFO(dev)->gen >= 9) { @@ -267,16 +283,11 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { - /* It doesn't mean we shouldn't send TPS patters, so let's - send the minimal TP1 possible and skip TP2. */ - val |= EDP_PSR_TP1_TIME_100us; - val |= EDP_PSR_TP2_TP3_TIME_0us; - val |= EDP_PSR_SKIP_AUX_EXIT; /* Sink should be able to train with the 5 or 6 idle patterns */ idle_frames += 4; } - I915_WRITE(EDP_PSR_CTL(dev), val | + I915_WRITE(EDP_PSR_CTL, val | (IS_BROADWELL(dev) ? 0 : link_entry_time) | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | @@ -340,7 +351,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); @@ -404,7 +415,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) } /* Avoid continuous PSR exit by masking memup and hpd */ - I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | + I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | EDP_PSR_DEBUG_MASK_HPD); /* Enable PSR on the panel */ @@ -427,6 +438,19 @@ void intel_psr_enable(struct intel_dp *intel_dp) vlv_psr_enable_source(intel_dp); } + /* + * FIXME: Activation should happen immediately since this function + * is just called after pipe is fully trained and enabled. + * However on every platform we face issues when first activation + * follows a modeset so quickly. + * - On VLV/CHV we get bank screen on first activation + * - On HSW/BDW we get a recoverable frozen screen until next + * exit-activate sequence. + */ + if (INTEL_INFO(dev)->gen < 9) + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); + dev_priv->psr.enabled = intel_dp; unlock: mutex_unlock(&dev_priv->psr.lock); @@ -466,17 +490,17 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->psr.active) { - I915_WRITE(EDP_PSR_CTL(dev), - I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); + I915_WRITE(EDP_PSR_CTL, + I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); /* Wait till PSR is idle */ - if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & + if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); dev_priv->psr.active = false; } else { - WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); } } @@ -523,7 +547,7 @@ static void intel_psr_work(struct work_struct *work) * and be ready for re-enable. */ if (HAS_DDI(dev_priv->dev)) { - if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & + if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) & EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); return; @@ -566,11 +590,11 @@ static void intel_psr_exit(struct drm_device *dev) return; if (HAS_DDI(dev)) { - val = I915_READ(EDP_PSR_CTL(dev)); + val = I915_READ(EDP_PSR_CTL); WARN_ON(!(val & EDP_PSR_ENABLE)); - I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); } else { val = I915_READ(VLV_PSRCTL(pipe)); @@ -700,7 +724,6 @@ void intel_psr_flush(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; enum pipe pipe; - int delay_ms = HAS_DDI(dev) ? 100 : 500; mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { @@ -735,8 +758,9 @@ void intel_psr_flush(struct drm_device *dev, } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(delay_ms)); + if (!work_busy(&dev_priv->psr.work.work)) + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(100)); mutex_unlock(&dev_priv->psr.lock); } @@ -751,6 +775,9 @@ void intel_psr_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? + HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; + INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9461a238f5d5..57d78f264b53 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -481,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 mmio = 0; + i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. @@ -524,7 +524,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring) * invalidating the TLB? */ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { - u32 reg = RING_INSTPM(ring->mmio_base); + i915_reg_t reg = RING_INSTPM(ring->mmio_base); /* ring should be idle before issuing a sync flush*/ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); @@ -733,7 +733,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_ring_emit(ring, w->reg[i].addr); + intel_ring_emit_reg(ring, w->reg[i].addr); intel_ring_emit(ring, w->reg[i].value); } intel_ring_emit(ring, MI_NOOP); @@ -766,7 +766,8 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) } static int wa_add(struct drm_i915_private *dev_priv, - const u32 addr, const u32 mask, const u32 val) + i915_reg_t addr, + const u32 mask, const u32 val) { const u32 idx = dev_priv->workarounds.count; @@ -924,17 +925,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 || - INTEL_REVID(dev) == SKL_REVID_B0)) || - (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { - /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ + /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ + if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); - } - if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { - /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ + /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ + if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -944,12 +943,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) */ } - if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || - IS_BROXTON(dev)) { - /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ + /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ + if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev)) WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, GEN9_ENABLE_YV12_BUGFIX); - } /* Wa4x4STCOptimizationDisable:skl,bxt */ /* WaDisablePartialResolveInVc:skl,bxt */ @@ -961,24 +958,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) + if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) + if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || + IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ - if (IS_SKYLAKE(dev) || - (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) { + if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); - } /* WaDisableSTUnitPowerOptimization:skl,bxt */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); @@ -1038,7 +1033,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) if (ret) return ret; - if (INTEL_REVID(dev) <= SKL_REVID_D0) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { /* WaDisableHDCInvalidation:skl */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | BDW_DISABLE_HDC_INVALIDATION); @@ -1051,23 +1046,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (INTEL_REVID(dev) <= SKL_REVID_E0) + if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { + if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (INTEL_REVID(dev) == SKL_REVID_B0) + if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - if (INTEL_REVID(dev) <= SKL_REVID_D0) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -1078,19 +1073,17 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) HDC_FORCE_NON_COHERENT); } - if (INTEL_REVID(dev) == SKL_REVID_C0 || - INTEL_REVID(dev) == SKL_REVID_D0) - /* WaBarrierPerformanceFixDisable:skl */ + /* WaBarrierPerformanceFixDisable:skl */ + if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (INTEL_REVID(dev) <= SKL_REVID_F0) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); - } return skl_tune_iz_hashing(ring); } @@ -1107,11 +1100,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (INTEL_REVID(dev) == BXT_REVID_A0) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (INTEL_REVID(dev) == BXT_REVID_A0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1121,7 +1114,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) STALL_DOP_GATING_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (INTEL_REVID(dev) <= BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1319,11 +1312,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_ring(useless, dev_priv, i) { - u32 mbox_reg = signaller->semaphore.mbox.signal[i]; - if (mbox_reg != GEN6_NOSYNC) { + i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i]; + + if (i915_mmio_reg_valid(mbox_reg)) { u32 seqno = i915_gem_request_get_seqno(signaller_req); + intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(signaller, mbox_reg); + intel_ring_emit_reg(signaller, mbox_reg); intel_ring_emit(signaller, seqno); } } @@ -2004,11 +1999,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { - iounmap(ringbuf->virtual_start); + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) + vunmap(ringbuf->virtual_start); + else + iounmap(ringbuf->virtual_start); ringbuf->virtual_start = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); } +static u32 *vmap_obj(struct drm_i915_gem_object *obj) +{ + struct sg_page_iter sg_iter; + struct page **pages; + void *addr; + int i; + + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); + if (pages == NULL) + return NULL; + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) + pages[i++] = sg_page_iter_page(&sg_iter); + + addr = vmap(pages, i, 0, PAGE_KERNEL); + drm_free_large(pages); + + return addr; +} + int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf) { @@ -2016,21 +2035,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, struct drm_i915_gem_object *obj = ringbuf->obj; int ret; - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); - if (ret) - return ret; + if (HAS_LLC(dev_priv) && !obj->stolen) { + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); + if (ret) + return ret; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) { - i915_gem_object_ggtt_unpin(obj); - return ret; - } + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + i915_gem_object_ggtt_unpin(obj); + return ret; + } + + ringbuf->virtual_start = vmap_obj(obj); + if (ringbuf->virtual_start == NULL) { + i915_gem_object_ggtt_unpin(obj); + return -ENOMEM; + } + } else { + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); + if (ret) + return ret; - ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + - i915_gem_obj_ggtt_offset(obj), ringbuf->size); - if (ringbuf->virtual_start == NULL) { - i915_gem_object_ggtt_unpin(obj); - return -EINVAL; + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) { + i915_gem_object_ggtt_unpin(obj); + return ret; + } + + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + + i915_gem_obj_ggtt_offset(obj), ringbuf->size); + if (ringbuf->virtual_start == NULL) { + i915_gem_object_ggtt_unpin(obj); + return -EINVAL; + } } return 0; @@ -2070,10 +2107,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) int ret; ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (ring == NULL) + if (ring == NULL) { + DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", + engine->name); return ERR_PTR(-ENOMEM); + } ring->ring = engine; + list_add(&ring->link, &engine->buffers); ring->size = size; /* Workaround an erratum on the i830 which causes a hang if @@ -2089,8 +2130,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) ret = intel_alloc_ringbuffer_obj(engine->dev, ring); if (ret) { - DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", - engine->name, ret); + DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", + engine->name, ret); + list_del(&ring->link); kfree(ring); return ERR_PTR(ret); } @@ -2102,6 +2144,7 @@ void intel_ringbuffer_free(struct intel_ringbuffer *ring) { intel_destroy_ringbuffer_obj(ring); + list_del(&ring->link); kfree(ring); } @@ -2117,6 +2160,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->execlist_queue); + INIT_LIST_HEAD(&ring->buffers); i915_gem_batch_pool_init(dev, &ring->batch_pool); memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 49fa41dc0eb6..5d1eb206151d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -100,6 +100,7 @@ struct intel_ringbuffer { void __iomem *virtual_start; struct intel_engine_cs *ring; + struct list_head link; u32 head; u32 tail; @@ -157,6 +158,7 @@ struct intel_engine_cs { u32 mmio_base; struct drm_device *dev; struct intel_ringbuffer *buffer; + struct list_head buffers; /* * A pool of objects to use as shadow copies of client batch buffers @@ -247,7 +249,7 @@ struct intel_engine_cs { /* our mbox written by others */ u32 wait[I915_NUM_RINGS]; /* mboxes this ring signals to */ - u32 signal[I915_NUM_RINGS]; + i915_reg_t signal[I915_NUM_RINGS]; } mbox; u64 signal_ggtt[I915_NUM_RINGS]; }; @@ -441,6 +443,11 @@ static inline void intel_ring_emit(struct intel_engine_cs *ring, iowrite32(data, ringbuf->virtual_start + ringbuf->tail); ringbuf->tail += 4; } +static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, + i915_reg_t reg) +{ + intel_ring_emit(ring, i915_mmio_reg_offset(reg)); +} static inline void intel_ring_advance(struct intel_engine_cs *ring) { struct intel_ringbuffer *ringbuf = ring->buffer; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index d89c1d0aa1b7..afca6c940b9a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -49,21 +49,18 @@ * present for a given platform. */ -#define GEN9_ENABLE_DC5(dev) 0 -#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev) - #define for_each_power_well(i, power_well, domain_mask, power_domains) \ for (i = 0; \ i < (power_domains)->power_well_count && \ ((power_well) = &(power_domains)->power_wells[i]); \ i++) \ - if ((power_well)->domains & (domain_mask)) + for_each_if ((power_well)->domains & (domain_mask)) #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ for (i = (power_domains)->power_well_count - 1; \ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ i--) \ - if ((power_well)->domains & (domain_mask)) + for_each_if ((power_well)->domains & (domain_mask)) bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, int power_well_id); @@ -244,12 +241,6 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_C | 1 << PIPE_B); } - - if (power_well->data == SKL_DISP_PW_1) { - if (!dev_priv->power_domains.initializing) - intel_prepare_ddi(dev); - gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); - } } static void hsw_set_power_well(struct drm_i915_private *dev_priv, @@ -292,58 +283,38 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_TRANSCODER_C) | \ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_AUX_D) | \ BIT(POWER_DOMAIN_AUDIO) | \ BIT(POWER_DOMAIN_VGA) | \ BIT(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ - SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT(POWER_DOMAIN_PLLS) | \ - BIT(POWER_DOMAIN_PIPE_A) | \ - BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ - BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ - BIT(POWER_DOMAIN_AUX_A) | \ - BIT(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ BIT(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ BIT(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ BIT(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ - SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ - BIT(POWER_DOMAIN_PLLS) | \ +#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT(POWER_DOMAIN_MODESET) | \ + BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ - (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ + (POWER_DOMAIN_MASK & ~( \ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \ - SKL_DISPLAY_DDI_B_POWER_DOMAINS | \ - SKL_DISPLAY_DDI_C_POWER_DOMAINS | \ - SKL_DISPLAY_DDI_D_POWER_DOMAINS | \ - SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \ + SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \ BIT(POWER_DOMAIN_INIT)) #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ @@ -354,25 +325,28 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_TRANSCODER_C) | \ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_AUDIO) | \ BIT(POWER_DOMAIN_VGA) | \ + BIT(POWER_DOMAIN_GMBUS) | \ BIT(POWER_DOMAIN_INIT)) #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT(POWER_DOMAIN_PIPE_A) | \ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_PLLS) | \ BIT(POWER_DOMAIN_INIT)) +#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT(POWER_DOMAIN_MODESET) | \ + BIT(POWER_DOMAIN_AUX_A) | \ + BIT(POWER_DOMAIN_INIT)) #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ @@ -416,46 +390,74 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) */ } -void bxt_enable_dc9(struct drm_i915_private *dev_priv) +static void gen9_set_dc_state_debugmask_memory_up( + struct drm_i915_private *dev_priv) { uint32_t val; - assert_can_enable_dc9(dev_priv); + /* The below bit doesn't need to be cleared ever afterwards */ + val = I915_READ(DC_STATE_DEBUG); + if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { + val |= DC_STATE_DEBUG_MASK_MEMORY_UP; + I915_WRITE(DC_STATE_DEBUG, val); + POSTING_READ(DC_STATE_DEBUG); + } +} - DRM_DEBUG_KMS("Enabling DC9\n"); +static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) +{ + uint32_t val; + uint32_t mask; + + mask = DC_STATE_EN_UPTO_DC5; + if (IS_BROXTON(dev_priv)) + mask |= DC_STATE_EN_DC9; + else + mask |= DC_STATE_EN_UPTO_DC6; + + WARN_ON_ONCE(state & ~mask); + + if (i915.enable_dc == 0) + state = DC_STATE_DISABLE; + else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) + state = DC_STATE_EN_UPTO_DC5; + + if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK) + gen9_set_dc_state_debugmask_memory_up(dev_priv); val = I915_READ(DC_STATE_EN); - val |= DC_STATE_EN_DC9; + DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", + val & mask, state); + val &= ~mask; + val |= state; I915_WRITE(DC_STATE_EN, val); POSTING_READ(DC_STATE_EN); } -void bxt_disable_dc9(struct drm_i915_private *dev_priv) +void bxt_enable_dc9(struct drm_i915_private *dev_priv) { - uint32_t val; + assert_can_enable_dc9(dev_priv); + + DRM_DEBUG_KMS("Enabling DC9\n"); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); +} +void bxt_disable_dc9(struct drm_i915_private *dev_priv) +{ assert_can_disable_dc9(dev_priv); DRM_DEBUG_KMS("Disabling DC9\n"); - val = I915_READ(DC_STATE_EN); - val &= ~DC_STATE_EN_DC9; - I915_WRITE(DC_STATE_EN, val); - POSTING_READ(DC_STATE_EN); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } -static void gen9_set_dc_state_debugmask_memory_up( - struct drm_i915_private *dev_priv) +static void assert_csr_loaded(struct drm_i915_private *dev_priv) { - uint32_t val; - - /* The below bit doesn't need to be cleared ever afterwards */ - val = I915_READ(DC_STATE_DEBUG); - if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { - val |= DC_STATE_DEBUG_MASK_MEMORY_UP; - I915_WRITE(DC_STATE_DEBUG, val); - POSTING_READ(DC_STATE_DEBUG); - } + WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), + "CSR program storage start is NULL\n"); + WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); + WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); } static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) @@ -478,8 +480,6 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) { - bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, - SKL_DISP_PW_2); /* * During initialization, the firmware may not be loaded yet. * We still want to make sure that the DC enabling flag is cleared. @@ -487,40 +487,17 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) if (dev_priv->power_domains.initializing) return; - WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n"); WARN_ONCE(dev_priv->pm.suspended, "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); } static void gen9_enable_dc5(struct drm_i915_private *dev_priv) { - uint32_t val; - assert_can_enable_dc5(dev_priv); DRM_DEBUG_KMS("Enabling DC5\n"); - gen9_set_dc_state_debugmask_memory_up(dev_priv); - - val = I915_READ(DC_STATE_EN); - val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; - val |= DC_STATE_EN_UPTO_DC5; - I915_WRITE(DC_STATE_EN, val); - POSTING_READ(DC_STATE_EN); -} - -static void gen9_disable_dc5(struct drm_i915_private *dev_priv) -{ - uint32_t val; - - assert_can_disable_dc5(dev_priv); - - DRM_DEBUG_KMS("Disabling DC5\n"); - - val = I915_READ(DC_STATE_EN); - val &= ~DC_STATE_EN_UPTO_DC5; - I915_WRITE(DC_STATE_EN, val); - POSTING_READ(DC_STATE_EN); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) @@ -546,40 +523,37 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv) if (dev_priv->power_domains.initializing) return; - assert_csr_loaded(dev_priv); WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), "DC6 already programmed to be disabled.\n"); } -static void skl_enable_dc6(struct drm_i915_private *dev_priv) +static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv) { - uint32_t val; + assert_can_disable_dc5(dev_priv); + if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) + assert_can_disable_dc6(dev_priv); + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); +} + +void skl_enable_dc6(struct drm_i915_private *dev_priv) +{ assert_can_enable_dc6(dev_priv); DRM_DEBUG_KMS("Enabling DC6\n"); - gen9_set_dc_state_debugmask_memory_up(dev_priv); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); - val = I915_READ(DC_STATE_EN); - val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; - val |= DC_STATE_EN_UPTO_DC6; - I915_WRITE(DC_STATE_EN, val); - POSTING_READ(DC_STATE_EN); } -static void skl_disable_dc6(struct drm_i915_private *dev_priv) +void skl_disable_dc6(struct drm_i915_private *dev_priv) { - uint32_t val; - assert_can_disable_dc6(dev_priv); DRM_DEBUG_KMS("Disabling DC6\n"); - val = I915_READ(DC_STATE_EN); - val &= ~DC_STATE_EN_UPTO_DC6; - I915_WRITE(DC_STATE_EN, val); - POSTING_READ(DC_STATE_EN); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } static void skl_set_power_well(struct drm_i915_private *dev_priv, @@ -629,20 +603,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, !I915_READ(HSW_PWR_WELL_BIOS), "Invalid for power well status to be enabled, unless done by the BIOS, \ when request is to disable!\n"); - if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && - power_well->data == SKL_DISP_PW_2) { - if (SKL_ENABLE_DC6(dev)) { - skl_disable_dc6(dev_priv); - /* - * DDI buffer programming unnecessary during driver-load/resume - * as it's already done during modeset initialization then. - * It's also invalid here as encoder list is still uninitialized. - */ - if (!dev_priv->power_domains.initializing) - intel_prepare_ddi(dev); - } else { - gen9_disable_dc5(dev_priv); - } + if (power_well->data == SKL_DISP_PW_2) { + /* + * DDI buffer programming unnecessary during + * driver-load/resume as it's already done + * during modeset initialization then. It's + * also invalid here as encoder list is still + * uninitialized. + */ + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); } I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); } @@ -657,34 +627,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, } } else { if (enable_requested) { - if (IS_SKYLAKE(dev) && - (power_well->data == SKL_DISP_PW_1) && - (intel_csr_load_status_get(dev_priv) == FW_LOADED)) - DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n"); - else { - I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); - POSTING_READ(HSW_PWR_WELL_DRIVER); - DRM_DEBUG_KMS("Disabling %s\n", power_well->name); - } - - if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && - power_well->data == SKL_DISP_PW_2) { - enum csr_state state; - /* TODO: wait for a completion event or - * similar here instead of busy - * waiting using wait_for function. - */ - wait_for((state = intel_csr_load_status_get(dev_priv)) != - FW_UNINITIALIZED, 1000); - if (state != FW_LOADED) - DRM_DEBUG("CSR firmware not ready (%d)\n", - state); - else - if (SKL_ENABLE_DC6(dev)) - skl_enable_dc6(dev_priv); - else - gen9_enable_dc5(dev_priv); - } + I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); + POSTING_READ(HSW_PWR_WELL_DRIVER); + DRM_DEBUG_KMS("Disabling %s\n", power_well->name); } } @@ -759,6 +704,41 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv, skl_set_power_well(dev_priv, power_well, false); } +static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; +} + +static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + gen9_disable_dc5_dc6(dev_priv); +} + +static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) + skl_enable_dc6(dev_priv); + else + gen9_enable_dc5(dev_priv); +} + +static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (power_well->count > 0) { + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + } else { + if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && + i915.enable_dc != 1) + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + else + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); + } +} + static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -973,10 +953,12 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr int power_well_id) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; int i; - for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { + for (i = 0; i < power_domains->power_well_count; i++) { + struct i915_power_well *power_well; + + power_well = &power_domains->power_wells[i]; if (power_well->data == power_well_id) return power_well; } @@ -1457,7 +1439,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { WARN_ON(!power_well->count); - if (!--power_well->count && i915.disable_power_well) + if (!--power_well->count) intel_power_well_disable(dev_priv, power_well); } @@ -1469,20 +1451,17 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ BIT(POWER_DOMAIN_PIPE_A) | \ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ BIT(POWER_DOMAIN_PORT_CRT) | \ BIT(POWER_DOMAIN_PLLS) | \ BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_AUX_D) | \ + BIT(POWER_DOMAIN_GMBUS) | \ BIT(POWER_DOMAIN_INIT)) #define HSW_DISPLAY_POWER_DOMAINS ( \ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ @@ -1499,49 +1478,42 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_PORT_CRT) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_INIT)) #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_INIT)) #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_INIT)) #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ BIT(POWER_DOMAIN_AUX_D) | \ BIT(POWER_DOMAIN_INIT)) @@ -1589,6 +1561,13 @@ static const struct i915_power_well_ops skl_power_well_ops = { .is_enabled = skl_power_well_enabled, }; +static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { + .sync_hw = gen9_dc_off_power_well_sync_hw, + .enable = gen9_dc_off_power_well_enable, + .disable = gen9_dc_off_power_well_disable, + .is_enabled = gen9_dc_off_power_well_enabled, +}; + static struct i915_power_well hsw_power_wells[] = { { .name = "always-on", @@ -1644,6 +1623,7 @@ static struct i915_power_well vlv_power_wells[] = { .always_on = 1, .domains = VLV_ALWAYS_ON_POWER_DOMAINS, .ops = &i9xx_always_on_power_well_ops, + .data = PUNIT_POWER_WELL_ALWAYS_ON, }, { .name = "display", @@ -1745,20 +1725,29 @@ static struct i915_power_well skl_power_wells[] = { .always_on = 1, .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, .ops = &i9xx_always_on_power_well_ops, + .data = SKL_DISP_PW_ALWAYS_ON, }, { .name = "power well 1", - .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS, + /* Handled by the DMC firmware */ + .domains = 0, .ops = &skl_power_well_ops, .data = SKL_DISP_PW_1, }, { .name = "MISC IO power well", - .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS, + /* Handled by the DMC firmware */ + .domains = 0, .ops = &skl_power_well_ops, .data = SKL_DISP_PW_MISC_IO, }, { + .name = "DC off", + .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .data = SKL_DISP_PW_DC_OFF, + }, + { .name = "power well 2", .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &skl_power_well_ops, @@ -1790,6 +1779,34 @@ static struct i915_power_well skl_power_wells[] = { }, }; +void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *well; + + if (!IS_SKYLAKE(dev_priv)) + return; + + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + + well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); + intel_power_well_enable(dev_priv, well); +} + +void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *well; + + if (!IS_SKYLAKE(dev_priv)) + return; + + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + + well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); + intel_power_well_disable(dev_priv, well); +} + static struct i915_power_well bxt_power_wells[] = { { .name = "always-on", @@ -1804,11 +1821,17 @@ static struct i915_power_well bxt_power_wells[] = { .data = SKL_DISP_PW_1, }, { + .name = "DC off", + .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .data = SKL_DISP_PW_DC_OFF, + }, + { .name = "power well 2", .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, .ops = &skl_power_well_ops, .data = SKL_DISP_PW_2, - } + }, }; static int @@ -1845,6 +1868,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, i915.disable_power_well); + BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); + mutex_init(&power_domains->lock); /* @@ -1855,7 +1880,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) set_power_wells(power_domains, hsw_power_wells); } else if (IS_BROADWELL(dev_priv->dev)) { set_power_wells(power_domains, bdw_power_wells); - } else if (IS_SKYLAKE(dev_priv->dev)) { + } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) { set_power_wells(power_domains, skl_power_wells); } else if (IS_BROXTON(dev_priv->dev)) { set_power_wells(power_domains, bxt_power_wells); @@ -1870,21 +1895,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) return 0; } -static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct device *device = &dev->pdev->dev; - - if (!HAS_RUNTIME_PM(dev)) - return; - - if (!intel_enable_rc6(dev)) - return; - - /* Make sure we're not suspended first. */ - pm_runtime_get_sync(device); -} - /** * intel_power_domains_fini - finalizes the power domain structures * @dev_priv: i915 device instance @@ -1895,15 +1905,17 @@ static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) */ void intel_power_domains_fini(struct drm_i915_private *dev_priv) { - intel_runtime_pm_disable(dev_priv); - /* The i915.ko module is still not prepared to be loaded when * the power well is not enabled, so just enable it in case * we're going to unload/reload. */ intel_display_set_init_power(dev_priv, true); + + /* Remove the refcount we took to keep power well support disabled. */ + if (!i915.disable_power_well) + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); } -static void intel_power_domains_resume(struct drm_i915_private *dev_priv) +static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *power_well; @@ -1918,6 +1930,47 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +static void skl_display_core_init(struct drm_i915_private *dev_priv, + bool resume) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + uint32_t val; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* enable PCH reset handshake */ + val = I915_READ(HSW_NDE_RSTWRN_OPT); + I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); + + /* enable PG1 and Misc I/O */ + mutex_lock(&power_domains->lock); + skl_pw1_misc_io_init(dev_priv); + mutex_unlock(&power_domains->lock); + + if (!resume) + return; + + skl_init_cdclk(dev_priv); + + if (dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); +} + +static void skl_display_core_uninit(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + skl_uninit_cdclk(dev_priv); + + /* The spec doesn't call for removing the reset handshake flag */ + /* disable PG1 and Misc I/O */ + mutex_lock(&power_domains->lock); + skl_pw1_misc_io_fini(dev_priv); + mutex_unlock(&power_domains->lock); +} + static void chv_phy_control_init(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = @@ -2040,14 +2093,16 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) * This function initializes the hardware power domain state and enables all * power domains using intel_display_set_init_power(). */ -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) { struct drm_device *dev = dev_priv->dev; struct i915_power_domains *power_domains = &dev_priv->power_domains; power_domains->initializing = true; - if (IS_CHERRYVIEW(dev)) { + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + skl_display_core_init(dev_priv, resume); + } else if (IS_CHERRYVIEW(dev)) { mutex_lock(&power_domains->lock); chv_phy_control_init(dev_priv); mutex_unlock(&power_domains->lock); @@ -2059,38 +2114,31 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) /* For now, we need the power well to be always enabled. */ intel_display_set_init_power(dev_priv, true); - intel_power_domains_resume(dev_priv); + /* Disable power support if the user asked so. */ + if (!i915.disable_power_well) + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_power_domains_sync_hw(dev_priv); power_domains->initializing = false; } /** - * intel_aux_display_runtime_get - grab an auxiliary power domain reference + * intel_power_domains_suspend - suspend power domain state * @dev_priv: i915 device instance * - * This function grabs a power domain reference for the auxiliary power domain - * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its - * parents are powered up. Therefore users should only grab a reference to the - * innermost power domain they need. - * - * Any power domain reference obtained by this function must have a symmetric - * call to intel_aux_display_runtime_put() to release the reference again. + * This function prepares the hardware power domain state before entering + * system suspend. It must be paired with intel_power_domains_init_hw(). */ -void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) +void intel_power_domains_suspend(struct drm_i915_private *dev_priv) { - intel_runtime_pm_get(dev_priv); -} + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_display_core_uninit(dev_priv); -/** - * intel_aux_display_runtime_put - release an auxiliary power domain reference - * @dev_priv: i915 device instance - * - * This function drops the auxiliary power domain reference obtained by - * intel_aux_display_runtime_get() and might power down the corresponding - * hardware block right away if this is the last reference. - */ -void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) -{ - intel_runtime_pm_put(dev_priv); + /* + * Even if power well support was disabled we still want to disable + * power wells while we are system suspended. + */ + if (!i915.disable_power_well) + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); } /** diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index c42b636c2087..06679f164b3e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -74,7 +74,7 @@ struct intel_sdvo { struct i2c_adapter ddc; /* Register for the SDVO device: SDVOB or SDVOC */ - uint32_t sdvo_reg; + i915_reg_t sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; @@ -120,8 +120,7 @@ struct intel_sdvo { */ bool is_tv; - /* On different gens SDVOB is at different places. */ - bool is_sdvob; + enum port port; /* This is for current tv format name */ int tv_format_index; @@ -245,7 +244,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) u32 bval = val, cval = val; int i; - if (intel_sdvo->sdvo_reg == PCH_SDVOB) { + if (HAS_PCH_SPLIT(dev_priv)) { I915_WRITE(intel_sdvo->sdvo_reg, val); POSTING_READ(intel_sdvo->sdvo_reg); /* @@ -259,7 +258,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) return; } - if (intel_sdvo->sdvo_reg == GEN3_SDVOB) + if (intel_sdvo->port == PORT_B) cval = I915_READ(GEN3_SDVOC); else bval = I915_READ(GEN3_SDVOB); @@ -422,7 +421,7 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; -#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") +#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) @@ -1282,14 +1281,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); - switch (intel_sdvo->sdvo_reg) { - case GEN3_SDVOB: + if (intel_sdvo->port == PORT_B) sdvox &= SDVOB_PRESERVE_MASK; - break; - case GEN3_SDVOC: + else sdvox &= SDVOC_PRESERVE_MASK; - break; - } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } @@ -1464,12 +1459,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) * matching DP port to be enabled on transcoder A. */ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) { + /* + * We get CPU/PCH FIFO underruns on the other pipe when + * doing the workaround. Sweep them under the rug. + */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); + temp &= ~SDVO_PIPE_B_SELECT; temp |= SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); temp &= ~SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); + + intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } } @@ -2251,7 +2257,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, { struct sdvo_device_mapping *mapping; - if (sdvo->is_sdvob) + if (sdvo->port == PORT_B) mapping = &(dev_priv->sdvo_mappings[0]); else mapping = &(dev_priv->sdvo_mappings[1]); @@ -2269,7 +2275,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *mapping; u8 pin; - if (sdvo->is_sdvob) + if (sdvo->port == PORT_B) mapping = &dev_priv->sdvo_mappings[0]; else mapping = &dev_priv->sdvo_mappings[1]; @@ -2307,7 +2313,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; - if (sdvo->is_sdvob) { + if (sdvo->port == PORT_B) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { @@ -2332,7 +2338,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ - if (sdvo->is_sdvob) + if (sdvo->port == PORT_B) return 0x70; else return 0x72; @@ -2939,18 +2945,31 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, return i2c_add_adapter(&sdvo->ddc) == 0; } -bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) +static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, + enum port port) +{ + if (HAS_PCH_SPLIT(dev_priv)) + WARN_ON(port != PORT_B); + else + WARN_ON(port != PORT_B && port != PORT_C); +} + +bool intel_sdvo_init(struct drm_device *dev, + i915_reg_t sdvo_reg, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; + + assert_sdvo_port_valid(dev_priv, port); + intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; intel_sdvo->sdvo_reg = sdvo_reg; - intel_sdvo->is_sdvob = is_sdvob; + intel_sdvo->port = port; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) @@ -3000,8 +3019,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) * hotplug lines. */ if (intel_sdvo->hotplug_active) { - intel_encoder->hpd_pin = - intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; + if (intel_sdvo->port == PORT_B) + intel_encoder->hpd_pin = HPD_SDVO_B; + else + intel_encoder->hpd_pin = HPD_SDVO_C; } /* diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 56dc132e8e20..2b96f336589e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -192,10 +192,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; u32 plane_ctl, stride_div, stride; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &to_intel_plane_state(drm_plane->state)->ckey; - unsigned long surf_addr; + u32 surf_addr; u32 tile_height, plane_offset, plane_size; unsigned int rotation; int x_offset, y_offset; @@ -212,10 +211,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, rotation = drm_plane->state->rotation; plane_ctl |= skl_plane_ctl_rotation(rotation); - intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, - pixel_size, true, - src_w != crtc_w || src_h != crtc_h); - stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], fb->pixel_format); @@ -297,8 +292,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) I915_WRITE(PLANE_SURF(pipe, plane), 0); POSTING_READ(PLANE_SURF(pipe, plane)); - - intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); } static void @@ -541,10 +534,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (IS_HASWELL(dev) || IS_BROADWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; - intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, - true, - src_w != crtc_w || src_h != crtc_h); - /* Sizes are 0 based */ src_w--; src_h--; @@ -678,10 +667,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (IS_GEN6(dev)) dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ - intel_update_sprite_watermarks(plane, crtc, src_w, src_h, - pixel_size, true, - src_w != crtc_w || src_h != crtc_h); - /* Sizes are 0 based */ src_w--; src_h--; @@ -832,8 +817,8 @@ intel_check_sprite_plane(struct drm_plane *plane, hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); if (hscale < 0) { DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); - drm_rect_debug_print(src, true); - drm_rect_debug_print(dst, false); + drm_rect_debug_print("src: ", src, true); + drm_rect_debug_print("dst: ", dst, false); return hscale; } @@ -841,8 +826,8 @@ intel_check_sprite_plane(struct drm_plane *plane, vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (vscale < 0) { DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); - drm_rect_debug_print(src, true); - drm_rect_debug_print(dst, false); + drm_rect_debug_print("src: ", src, true); + drm_rect_debug_print("dst: ", dst, false); return vscale; } @@ -938,9 +923,6 @@ intel_commit_sprite_plane(struct drm_plane *plane, crtc = crtc ? crtc : plane->crtc; - if (!crtc->state->active) - return; - if (state->visible) { intel_plane->update_plane(plane, crtc, fb, state->dst.x1, state->dst.y1, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 43cba129a0c0..c2358ba78b30 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -29,19 +29,7 @@ #define FORCEWAKE_ACK_TIMEOUT_MS 50 -#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) -#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) - -#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) -#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) - -#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) -#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) - -#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) -#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) - -#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) +#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) static const char * const forcewake_domain_names[] = { "render", @@ -72,7 +60,7 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv) static inline void fw_domain_reset(const struct intel_uncore_forcewake_domain *d) { - WARN_ON(d->reg_set == 0); + WARN_ON(!i915_mmio_reg_valid(d->reg_set)); __raw_i915_write32(d->i915, d->reg_set, d->val_reset); } @@ -118,7 +106,7 @@ static inline void fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) { /* something from same cacheline, but not from the set register */ - if (d->reg_post) + if (i915_mmio_reg_valid(d->reg_post)) __raw_posting_read(d->i915, d->reg_post); } @@ -525,8 +513,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) } /* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(reg) \ - ((reg) < 0x40000 && (reg) != FORCEWAKE) +#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) @@ -589,7 +576,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) REG_RANGE((reg), 0x9400, 0x9800) #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ - ((reg) < 0x40000 &&\ + ((reg) < 0x40000 && \ !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ @@ -605,8 +592,8 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) } static void -hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, - bool before) +hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, + i915_reg_t reg, bool read, bool before) { const char *op = read ? "reading" : "writing to"; const char *when = before ? "before" : "after"; @@ -616,7 +603,7 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { WARN(1, "Unclaimed register detected %s %s register 0x%x\n", - when, op, reg); + when, op, i915_mmio_reg_offset(reg)); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); i915.mmio_debug--; /* Only report the first N failures */ } @@ -649,7 +636,7 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) #define __gen2_read(x) \ static u##x \ -gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ +gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ GEN2_READ_HEADER(x); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN2_READ_FOOTER; \ @@ -657,7 +644,7 @@ gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ #define __gen5_read(x) \ static u##x \ -gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ +gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ GEN2_READ_HEADER(x); \ ilk_dummy_write(dev_priv); \ val = __raw_i915_read##x(dev_priv, reg); \ @@ -680,6 +667,7 @@ __gen2_read(64) #undef GEN2_READ_HEADER #define GEN6_READ_HEADER(x) \ + u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ u##x val = 0; \ assert_device_not_suspended(dev_priv); \ @@ -714,20 +702,12 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv, dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); } -#define __vgpu_read(x) \ -static u##x \ -vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ - GEN6_READ_HEADER(x); \ - val = __raw_i915_read##x(dev_priv, reg); \ - GEN6_READ_FOOTER; \ -} - #define __gen6_read(x) \ static u##x \ -gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ +gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ GEN6_READ_HEADER(x); \ hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ - if (NEEDS_FORCE_WAKE(reg)) \ + if (NEEDS_FORCE_WAKE(offset)) \ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ val = __raw_i915_read##x(dev_priv, reg); \ hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ @@ -736,47 +716,56 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ #define __vlv_read(x) \ static u##x \ -vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ +vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ + enum forcewake_domains fw_engine = 0; \ GEN6_READ_HEADER(x); \ - if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ - else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ + if (!NEEDS_FORCE_WAKE(offset)) \ + fw_engine = 0; \ + else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_MEDIA; \ + if (fw_engine) \ + __force_wake_get(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } #define __chv_read(x) \ static u##x \ -chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ +chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ + enum forcewake_domains fw_engine = 0; \ GEN6_READ_HEADER(x); \ - if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ - else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ - else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, \ - FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ + if (!NEEDS_FORCE_WAKE(offset)) \ + fw_engine = 0; \ + else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_MEDIA; \ + else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + if (fw_engine) \ + __force_wake_get(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } #define SKL_NEEDS_FORCE_WAKE(reg) \ - ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) + ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) #define __gen9_read(x) \ static u##x \ -gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ +gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ - if (!SKL_NEEDS_FORCE_WAKE(reg)) \ + if (!SKL_NEEDS_FORCE_WAKE(offset)) \ fw_engine = 0; \ - else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ + else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ + else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ fw_engine = FORCEWAKE_MEDIA; \ - else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ + else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ else \ fw_engine = FORCEWAKE_BLITTER; \ @@ -787,10 +776,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ GEN6_READ_FOOTER; \ } -__vgpu_read(8) -__vgpu_read(16) -__vgpu_read(32) -__vgpu_read(64) __gen9_read(8) __gen9_read(16) __gen9_read(32) @@ -812,10 +797,37 @@ __gen6_read(64) #undef __chv_read #undef __vlv_read #undef __gen6_read -#undef __vgpu_read #undef GEN6_READ_FOOTER #undef GEN6_READ_HEADER +#define VGPU_READ_HEADER(x) \ + unsigned long irqflags; \ + u##x val = 0; \ + assert_device_not_suspended(dev_priv); \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + +#define VGPU_READ_FOOTER \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ + return val + +#define __vgpu_read(x) \ +static u##x \ +vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ + VGPU_READ_HEADER(x); \ + val = __raw_i915_read##x(dev_priv, reg); \ + VGPU_READ_FOOTER; \ +} + +__vgpu_read(8) +__vgpu_read(16) +__vgpu_read(32) +__vgpu_read(64) + +#undef __vgpu_read +#undef VGPU_READ_FOOTER +#undef VGPU_READ_HEADER + #define GEN2_WRITE_HEADER \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ assert_device_not_suspended(dev_priv); \ @@ -824,7 +836,7 @@ __gen6_read(64) #define __gen2_write(x) \ static void \ -gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ +gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ GEN2_WRITE_HEADER; \ __raw_i915_write##x(dev_priv, reg, val); \ GEN2_WRITE_FOOTER; \ @@ -832,7 +844,7 @@ gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace #define __gen5_write(x) \ static void \ -gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ +gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ GEN2_WRITE_HEADER; \ ilk_dummy_write(dev_priv); \ __raw_i915_write##x(dev_priv, reg, val); \ @@ -855,6 +867,7 @@ __gen2_write(64) #undef GEN2_WRITE_HEADER #define GEN6_WRITE_HEADER \ + u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ assert_device_not_suspended(dev_priv); \ @@ -865,10 +878,10 @@ __gen2_write(64) #define __gen6_write(x) \ static void \ -gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ +gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ u32 __fifo_ret = 0; \ GEN6_WRITE_HEADER; \ - if (NEEDS_FORCE_WAKE(reg)) { \ + if (NEEDS_FORCE_WAKE(offset)) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ __raw_i915_write##x(dev_priv, reg, val); \ @@ -880,10 +893,10 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace #define __hsw_write(x) \ static void \ -hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ +hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ u32 __fifo_ret = 0; \ GEN6_WRITE_HEADER; \ - if (NEEDS_FORCE_WAKE(reg)) { \ + if (NEEDS_FORCE_WAKE(offset)) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ @@ -896,15 +909,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) GEN6_WRITE_FOOTER; \ } -#define __vgpu_write(x) \ -static void vgpu_write##x(struct drm_i915_private *dev_priv, \ - off_t reg, u##x val, bool trace) { \ - GEN6_WRITE_HEADER; \ - __raw_i915_write##x(dev_priv, reg, val); \ - GEN6_WRITE_FOOTER; \ -} - -static const u32 gen8_shadowed_regs[] = { +static const i915_reg_t gen8_shadowed_regs[] = { FORCEWAKE_MT, GEN6_RPNSWREQ, GEN6_RC_VIDEO_FREQ, @@ -915,11 +920,12 @@ static const u32 gen8_shadowed_regs[] = { /* TODO: Other registers are not yet used */ }; -static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) +static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, + i915_reg_t reg) { int i; for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) - if (reg == gen8_shadowed_regs[i]) + if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i])) return true; return false; @@ -927,10 +933,10 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) #define __gen8_write(x) \ static void \ -gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ +gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ GEN6_WRITE_HEADER; \ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ - if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ + if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ __raw_i915_write##x(dev_priv, reg, val); \ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ @@ -940,22 +946,25 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace #define __chv_write(x) \ static void \ -chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ - bool shadowed = is_gen8_shadowed(dev_priv, reg); \ +chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ + enum forcewake_domains fw_engine = 0; \ GEN6_WRITE_HEADER; \ - if (!shadowed) { \ - if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ - else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ - else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ - } \ + if (!NEEDS_FORCE_WAKE(offset) || \ + is_gen8_shadowed(dev_priv, reg)) \ + fw_engine = 0; \ + else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_MEDIA; \ + else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ + fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + if (fw_engine) \ + __force_wake_get(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ GEN6_WRITE_FOOTER; \ } -static const u32 gen9_shadowed_regs[] = { +static const i915_reg_t gen9_shadowed_regs[] = { RING_TAIL(RENDER_RING_BASE), RING_TAIL(GEN6_BSD_RING_BASE), RING_TAIL(VEBOX_RING_BASE), @@ -968,11 +977,12 @@ static const u32 gen9_shadowed_regs[] = { /* TODO: Other registers are not yet used */ }; -static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) +static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, + i915_reg_t reg) { int i; for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) - if (reg == gen9_shadowed_regs[i]) + if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i])) return true; return false; @@ -980,19 +990,19 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) #define __gen9_write(x) \ static void \ -gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ +gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ - if (!SKL_NEEDS_FORCE_WAKE(reg) || \ + if (!SKL_NEEDS_FORCE_WAKE(offset) || \ is_gen9_shadowed(dev_priv, reg)) \ fw_engine = 0; \ - else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ + else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ + else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ fw_engine = FORCEWAKE_MEDIA; \ - else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ + else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ else \ fw_engine = FORCEWAKE_BLITTER; \ @@ -1024,20 +1034,41 @@ __gen6_write(8) __gen6_write(16) __gen6_write(32) __gen6_write(64) -__vgpu_write(8) -__vgpu_write(16) -__vgpu_write(32) -__vgpu_write(64) #undef __gen9_write #undef __chv_write #undef __gen8_write #undef __hsw_write #undef __gen6_write -#undef __vgpu_write #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER +#define VGPU_WRITE_HEADER \ + unsigned long irqflags; \ + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ + assert_device_not_suspended(dev_priv); \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + +#define VGPU_WRITE_FOOTER \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) + +#define __vgpu_write(x) \ +static void vgpu_write##x(struct drm_i915_private *dev_priv, \ + i915_reg_t reg, u##x val, bool trace) { \ + VGPU_WRITE_HEADER; \ + __raw_i915_write##x(dev_priv, reg, val); \ + VGPU_WRITE_FOOTER; \ +} + +__vgpu_write(8) +__vgpu_write(16) +__vgpu_write(32) +__vgpu_write(64) + +#undef __vgpu_write +#undef VGPU_WRITE_FOOTER +#undef VGPU_WRITE_HEADER + #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ do { \ dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ @@ -1057,7 +1088,8 @@ do { \ static void fw_domain_init(struct drm_i915_private *dev_priv, enum forcewake_domain_id domain_id, - u32 reg_set, u32 reg_ack) + i915_reg_t reg_set, + i915_reg_t reg_ack) { struct intel_uncore_forcewake_domain *d; @@ -1087,8 +1119,6 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, d->reg_post = FORCEWAKE_ACK_VLV; else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) d->reg_post = ECOBUS; - else - d->reg_post = 0; d->i915 = dev_priv; d->id = domain_id; @@ -1262,12 +1292,14 @@ void intel_uncore_fini(struct drm_device *dev) #define GEN_RANGE(l, h) GENMASK(h, l) static const struct register_whitelist { - uint64_t offset; + i915_reg_t offset_ldw, offset_udw; uint32_t size; /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ uint32_t gen_bitmask; } whitelist[] = { - { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, + { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), + .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), + .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, }; int i915_reg_read_ioctl(struct drm_device *dev, @@ -1277,11 +1309,11 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; unsigned size; - u64 offset; + i915_reg_t offset_ldw, offset_udw; int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == (reg->offset & -entry->size) && + if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) break; } @@ -1293,27 +1325,28 @@ int i915_reg_read_ioctl(struct drm_device *dev, * be naturally aligned (and those that are not so aligned merely * limit the available flags for that register). */ - offset = entry->offset; + offset_ldw = entry->offset_ldw; + offset_udw = entry->offset_udw; size = entry->size; - size |= reg->offset ^ offset; + size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); intel_runtime_pm_get(dev_priv); switch (size) { case 8 | 1: - reg->val = I915_READ64_2x32(offset, offset+4); + reg->val = I915_READ64_2x32(offset_ldw, offset_udw); break; case 8: - reg->val = I915_READ64(offset); + reg->val = I915_READ64(offset_ldw); break; case 4: - reg->val = I915_READ(offset); + reg->val = I915_READ(offset_ldw); break; case 2: - reg->val = I915_READ16(offset); + reg->val = I915_READ16(offset_ldw); break; case 1: - reg->val = I915_READ8(offset); + reg->val = I915_READ8(offset_ldw); break; default: ret = -EINVAL; @@ -1470,7 +1503,7 @@ static int gen6_do_reset(struct drm_device *dev) } static int wait_for_register(struct drm_i915_private *dev_priv, - const u32 reg, + i915_reg_t reg, const u32 mask, const u32 value, const unsigned long timeout_ms) diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 2b81a417cf29..35ca4f007839 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -10,15 +10,6 @@ config DRM_IMX help enable i.MX graphics support -config DRM_IMX_FB_HELPER - tristate "provide legacy framebuffer /dev/fb0" - select DRM_KMS_CMA_HELPER - depends on DRM_IMX - help - The DRM framework can provide a legacy /dev/fb0 framebuffer - for your device. This is necessary to get a framebuffer console - and also for applications using the legacy framebuffer API - config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" select DRM_PANEL diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 64f16ea779ef..882cf3d4b7a8 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -49,8 +49,10 @@ struct imx_drm_crtc { struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs; }; +#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) static int legacyfb_depth = 16; module_param(legacyfb_depth, int, 0444); +#endif int imx_drm_crtc_id(struct imx_drm_crtc *crtc) { @@ -60,26 +62,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_id); static void imx_drm_driver_lastclose(struct drm_device *drm) { -#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) struct imx_drm_device *imxdrm = drm->dev_private; - if (imxdrm->fbhelper) - drm_fbdev_cma_restore_mode(imxdrm->fbhelper); -#endif + drm_fbdev_cma_restore_mode(imxdrm->fbhelper); } static int imx_drm_driver_unload(struct drm_device *drm) { -#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) struct imx_drm_device *imxdrm = drm->dev_private; -#endif drm_kms_helper_poll_fini(drm); -#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) if (imxdrm->fbhelper) drm_fbdev_cma_fini(imxdrm->fbhelper); -#endif component_unbind_all(drm->dev, drm); @@ -215,11 +210,9 @@ EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy); static void imx_drm_output_poll_changed(struct drm_device *drm) { -#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) struct imx_drm_device *imxdrm = drm->dev_private; drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); -#endif } static struct drm_mode_config_funcs imx_drm_mode_config_funcs = { @@ -308,7 +301,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) * The fb helper takes copies of key hardware information, so the * crtcs/connectors/encoders must not change after this point. */ -#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) +#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) if (legacyfb_depth != 16 && legacyfb_depth != 32) { dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); legacyfb_depth = 16; @@ -340,7 +333,7 @@ err_kms: * imx_drm_add_crtc - add a new crtc */ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, - struct imx_drm_crtc **new_crtc, + struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, struct device_node *port) { @@ -379,7 +372,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); - drm_crtc_init(drm, crtc, + drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); return 0; diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 28e776d8d9d2..83284b4d4be1 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -9,6 +9,7 @@ struct drm_display_mode; struct drm_encoder; struct drm_fbdev_cma; struct drm_framebuffer; +struct drm_plane; struct imx_drm_crtc; struct platform_device; @@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs { }; int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, - struct imx_drm_crtc **new_crtc, + struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, struct device_node *port); int imx_drm_remove_crtc(struct imx_drm_crtc *); diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index e671ad369416..f9597146dc67 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = { { .compatible = "fsl,imx53-tve", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx_tve_dt_ids); static struct platform_driver imx_tve_driver = { .probe = imx_tve_probe, diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7bc8301fafff..4ab841eebee1 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) spin_lock_irqsave(&drm->event_lock, flags); if (ipu_crtc->page_flip_event) - drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); + drm_crtc_send_vblank_event(&ipu_crtc->base, + ipu_crtc->page_flip_event); ipu_crtc->page_flip_event = NULL; imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); spin_unlock_irqrestore(&drm->event_lock, flags); @@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int dp = -EINVAL; int ret; - int id; ret = ipu_get_resources(ipu_crtc, pdata); if (ret) { @@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, return ret; } + if (pdata->dp >= 0) + dp = IPU_DP_FLOW_SYNC_BG; + ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, + DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(ipu_crtc->plane[0])) { + ret = PTR_ERR(ipu_crtc->plane[0]); + goto err_put_resources; + } + ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, - &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); + &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, + ipu_crtc->dev->of_node); if (ret) { dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); goto err_put_resources; } - if (pdata->dp >= 0) - dp = IPU_DP_FLOW_SYNC_BG; - id = imx_drm_crtc_id(ipu_crtc->imx_crtc); - ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu, - pdata->dma[0], dp, BIT(id), true); ret = ipu_plane_get_resources(ipu_crtc->plane[0]); if (ret) { dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", @@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, /* If this crtc is using the DP, add an overlay plane */ if (pdata->dp >= 0 && pdata->dma[1] > 0) { - ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, - pdata->dma[1], - IPU_DP_FLOW_SYNC_FG, - BIT(id), false); + ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1], + IPU_DP_FLOW_SYNC_FG, + drm_crtc_mask(&ipu_crtc->base), + DRM_PLANE_TYPE_OVERLAY); if (IS_ERR(ipu_crtc->plane[1])) ipu_crtc->plane[1] = NULL; } @@ -407,28 +412,6 @@ err_put_resources: return ret; } -static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent, - int port_id) -{ - struct device_node *port; - int id, ret; - - port = of_get_child_by_name(parent, "port"); - while (port) { - ret = of_property_read_u32(port, "reg", &id); - if (!ret && id == port_id) - return port; - - do { - port = of_get_next_child(parent, port); - if (!port) - return NULL; - } while (of_node_cmp(port->name, "port")); - } - - return NULL; -} - static int ipu_drm_bind(struct device *dev, struct device *master, void *data) { struct ipu_client_platformdata *pdata = dev->platform_data; @@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = { static int ipu_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct ipu_client_platformdata *pdata = dev->platform_data; int ret; if (!dev->platform_data) return -EINVAL; - if (!dev->of_node) { - /* Associate crtc device with the corresponding DI port node */ - dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node, - pdata->di + 2); - if (!dev->of_node) { - dev_err(dev, "missing port@%d node in %s\n", - pdata->di + 2, dev->parent->of_node->full_name); - return -ENODEV; - } - } - ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) return ret; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 575f4c84388f..e2ff410bab74 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = { struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, int dma, int dp, unsigned int possible_crtcs, - bool priv) + enum drm_plane_type type) { struct ipu_plane *ipu_plane; int ret; @@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, ipu_plane->dma = dma; ipu_plane->dp_flow = dp; - ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, - &ipu_plane_funcs, ipu_plane_formats, - ARRAY_SIZE(ipu_plane_formats), - priv); + ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, + &ipu_plane_funcs, ipu_plane_formats, + ARRAY_SIZE(ipu_plane_formats), type); if (ret) { DRM_ERROR("failed to initialize plane\n"); kfree(ipu_plane); diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index 9b5eff18f5b8..3a443b413c60 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -34,7 +34,7 @@ struct ipu_plane { struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, int dma, int dp, unsigned int possible_crtcs, - bool priv); + enum drm_plane_type type); /* Init IDMAC, DMFC, DP */ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index b4deb9cf9d71..2e9b9f1b5cd2 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) if (imxpd->panel && imxpd->panel->funcs && imxpd->panel->funcs->get_modes) { + struct drm_display_info *di = &connector->display_info; + num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); + if (!imxpd->bus_format && di->num_bus_formats) + imxpd->bus_format = di->bus_formats[0]; if (num_modes > 0) return num_modes; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 912151c36d59..205b2801d3b8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -252,7 +252,7 @@ void mgag200_fbdev_fini(struct mga_device *mdev); /* mgag200_main.c */ int mgag200_framebuffer_init(struct drm_device *dev, struct mga_framebuffer *mfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index b35b5b2db4ec..d9b04b008feb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -138,7 +138,7 @@ static struct fb_ops mgag200fb_ops = { }; static int mgag200fb_create_object(struct mga_fbdev *afbdev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct drm_device *dev = afbdev->helper.dev; diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index b1a0f5656175..9147444d5bf2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -29,7 +29,7 @@ static const struct drm_framebuffer_funcs mga_fb_funcs = { int mgag200_framebuffer_init(struct drm_device *dev, struct mga_framebuffer *gfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -47,7 +47,7 @@ int mgag200_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * mgag200_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct mga_framebuffer *mga_fb; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 3be7a56b14f1..9a713b7a009d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -240,9 +240,9 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, - struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 121713281417..a474d6cf5d9f 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -138,7 +138,7 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb) } struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, - struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *bos[4] = {0}; struct drm_framebuffer *fb; @@ -168,7 +168,7 @@ out_unref: } struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index 28bc202f9753..40f845e31272 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h @@ -7,6 +7,7 @@ struct nvkm_instmem { const struct nvkm_instmem_func *func; struct nvkm_subdev subdev; + spinlock_t lock; struct list_head list; u32 reserved; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8b8332e46f24..d5e6938cc6bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, return -ENODEV; } obj = (union acpi_object *)buffer.pointer; + len = min(len, (int)obj->buffer.length); memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index db6bc6760545..18676b8c1721 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -246,7 +246,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nv_fb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo) { struct nouveau_display *disp = nouveau_display(dev); @@ -272,7 +272,7 @@ nouveau_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * nouveau_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct nouveau_framebuffer *nouveau_fb; struct drm_gem_object *gem; @@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, struct drm_device *dev = drm->dev; struct nouveau_page_flip_state *s; unsigned long flags; - int crtcid = -1; spin_lock_irqsave(&dev->event_lock, flags); @@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) { - /* Vblank timestamps/counts are only correct on >= NV-50 */ - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) - crtcid = s->crtc; + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { + drm_arm_vblank_event(dev, s->crtc, s->event); + } else { + drm_send_vblank_event(dev, s->crtc, s->event); - drm_send_vblank_event(dev, crtcid, s->event); + /* Give up ownership of vblank for page-flipped crtc */ + drm_vblank_put(dev, s->crtc); + } + } + else { + /* Give up ownership of vblank for page-flipped crtc */ + drm_vblank_put(dev, s->crtc); } - - /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); list_del(&s->head); if (ps) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 856abe0f070d..5a57d8b472c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -23,7 +23,7 @@ nouveau_framebuffer(struct drm_framebuffer *fb) } int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *, - struct drm_mode_fb_cmd2 *, struct nouveau_bo *); + const struct drm_mode_fb_cmd2 *, struct nouveau_bo *); struct nouveau_page_flip_state { struct list_head head; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 3050042e6c6d..a02813e994ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -39,6 +39,7 @@ #include <nvif/client.h> #include <nvif/device.h> +#include <nvif/ioctl.h> #include <drmP.h> @@ -65,9 +66,10 @@ struct nouveau_drm_tile { }; enum nouveau_drm_object_route { - NVDRM_OBJECT_NVIF = 0, + NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF, NVDRM_OBJECT_USIF, NVDRM_OBJECT_ABI16, + NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY, }; enum nouveau_drm_notify_route { diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 1e2e9e27a03b..ca77ad001978 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -34,7 +34,6 @@ struct nouveau_fbdev { struct drm_fb_helper helper; struct nouveau_framebuffer nouveau_fb; - struct list_head fbdev_list; struct drm_device *dev; unsigned int saved_flags; struct nvif_object surf2d; diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 89dc4ce63490..6ae1b3494bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (nvif_unpack(argv->v0, 0, 0, true)) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - argv->v0.owner = NVDRM_OBJECT_USIF; + if (argv->v0.object == 0ULL) + argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ + else + argv->v0.owner = NVDRM_OBJECT_USIF; } else goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index e3c783d0e2ab..caf22b589edc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -279,6 +279,12 @@ nvkm_device_pci_10de_0fe3[] = { }; static const struct nvkm_device_pci_vendor +nvkm_device_pci_10de_0fe4[] = { + { 0x144d, 0xc740, NULL, { .War00C800_0 = true } }, + {} +}; + +static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_104b[] = { { 0x1043, 0x844c, "GeForce GT 625" }, { 0x1043, 0x846b, "GeForce GT 625" }, @@ -689,6 +695,12 @@ nvkm_device_pci_10de_1199[] = { }; static const struct nvkm_device_pci_vendor +nvkm_device_pci_10de_11e0[] = { + { 0x1558, 0x5106, NULL, { .War00C800_0 = true } }, + {} +}; + +static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_11e3[] = { { 0x17aa, 0x3683, "GeForce GTX 760A" }, {} @@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = { { 0x0fe1, "GeForce GT 730M" }, { 0x0fe2, "GeForce GT 745M" }, { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, - { 0x0fe4, "GeForce GT 750M" }, + { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 }, { 0x0fe9, "GeForce GT 750M" }, { 0x0fea, "GeForce GT 755M" }, { 0x0fec, "GeForce 710A" }, @@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = { { 0x11c6, "GeForce GTX 650 Ti" }, { 0x11c8, "GeForce GTX 650" }, { 0x11cb, "GeForce GT 740" }, - { 0x11e0, "GeForce GTX 770M" }, + { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 }, { 0x11e1, "GeForce GTX 765M" }, { 0x11e2, "GeForce GTX 765M" }, { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index b5b875928aba..74de7a96c22a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c @@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info) const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; const u32 t = timeslice_mode; const u32 o = PPC_UNIT(gpc, ppc, 0); + if (!(gr->ppc_mask[gpc] & (1 << ppc))) + continue; mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc index 194afe910d21..7dacb3cc0668 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc @@ -52,10 +52,12 @@ mmio_list_base: #endif #ifdef INCLUDE_CODE +#define gpc_addr(reg,addr) /* +*/ imm32(reg,addr) /* +*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE #define gpc_wr32(addr,reg) /* +*/ gpc_addr($r14,addr) /* */ mov b32 $r15 reg /* -*/ imm32($r14, addr) /* -*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /* */ call(nv_wr32) // reports an exception to the host @@ -161,7 +163,7 @@ init: #if NV_PGRAPH_GPCX_UNK__SIZE > 0 // figure out which, and how many, UNKs are actually present - imm32($r14, 0x500c30) + gpc_addr($r14, 0x500c30) clear b32 $r2 clear b32 $r3 clear b32 $r4 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h index 64d07df4b8b1..bb820ff28621 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h @@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40126, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf401, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f008, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x080007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h index 2f596433c222..911976d20940 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h @@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40126, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf401, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f008, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x080007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h index ee8e54db8fc9..1c6e11b05df2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h @@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40226, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf402, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f030, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x300007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h index fbcc342f896f..84af7ec6a78e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h @@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = { 0x02020014, 0xf6120040, 0x04bd0002, - 0xfe048141, + 0xfe048441, 0x00400010, 0x0000f607, 0x040204bd, @@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = { 0x01c90080, 0xbd0002f6, 0x0c308e04, - 0xbd24bd50, -/* 0x0383: init_unk_loop */ - 0x7e44bd34, - 0xb0000065, - 0x0bf400f6, - 0xbb010f0e, - 0x4ffd04f2, - 0x0130b605, -/* 0x0398: init_unk_next */ - 0xb60120b6, - 0x26b004e0, - 0xe21bf401, -/* 0x03a4: init_unk_done */ - 0xb50703b5, - 0x00820804, - 0x22cf0201, - 0x9534bd00, - 0x00800825, - 0x05f601c0, - 0x8004bd00, - 0xf601c100, - 0x04bd0005, - 0x98000e98, - 0x207e010f, - 0x2fbb0001, - 0x003fbb00, - 0x98010e98, - 0x207e020f, - 0x0e980001, - 0x00effd05, - 0xbb002ebb, - 0x0e98003e, - 0x030f9802, - 0x0001207e, - 0xfd070e98, - 0x2ebb00ef, - 0x003ebb00, - 0x800235b6, - 0xf601d300, - 0x04bd0003, - 0xb60825b6, - 0x20b60635, - 0x0130b601, - 0xb60824b6, - 0x2fb20834, - 0x0002687e, - 0xbb002fbb, - 0x0080003f, - 0x03f60201, - 0xbd04bd00, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, -/* 0x0445: main */ - 0x0031f404, - 0x0d0028f4, - 0x00377e24, - 0xf401f400, - 0xf404e4b0, - 0x81fe1d18, - 0xbd060201, - 0x0412fd20, - 0xfd01e4b6, - 0x18fe051e, - 0x05187e00, - 0xd40ef400, -/* 0x0474: main_not_ctx_xfer */ - 0xf010ef94, - 0xf87e01f5, - 0x0ef40002, -/* 0x0481: ih */ - 0xfe80f9c7, - 0x80f90188, - 0xa0f990f9, - 0xd0f9b0f9, - 0xf0f9e0f9, - 0x004a04bd, - 0x00aacf02, - 0xf404abc4, - 0x240d1f0b, - 0xcf1a004e, - 0x004f00ee, - 0x00ffcf19, - 0x0000047e, - 0x0040010e, - 0x000ef61d, -/* 0x04be: ih_no_fifo */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0x0032f480, -/* 0x04de: hub_barrier_done */ - 0x010f01f8, - 0xbb040e98, - 0xffb204fe, - 0x4094188e, - 0x00008f7e, -/* 0x04f2: ctx_redswitch */ - 0x200f00f8, + 0x01e5f050, + 0x34bd24bd, +/* 0x0386: init_unk_loop */ + 0x657e44bd, + 0xf6b00000, + 0x0e0bf400, + 0xf2bb010f, + 0x054ffd04, +/* 0x039b: init_unk_next */ + 0xb60130b6, + 0xe0b60120, + 0x0126b004, +/* 0x03a7: init_unk_done */ + 0xb5e21bf4, + 0x04b50703, + 0x01008208, + 0x0022cf02, + 0x259534bd, + 0xc0008008, + 0x0005f601, + 0x008004bd, + 0x05f601c1, + 0x9804bd00, + 0x0f98000e, + 0x01207e01, + 0x002fbb00, + 0x98003fbb, + 0x0f98010e, + 0x01207e02, + 0x050e9800, + 0xbb00effd, + 0x3ebb002e, + 0x020e9800, + 0x7e030f98, + 0x98000120, + 0xeffd070e, + 0x002ebb00, + 0xb6003ebb, + 0x00800235, + 0x03f601d3, + 0xb604bd00, + 0x35b60825, + 0x0120b606, + 0xb60130b6, + 0x34b60824, + 0x7e2fb208, + 0xbb000268, + 0x3fbb002f, + 0x01008000, + 0x0003f602, + 0x24bd04bd, + 0x801f29f0, + 0xf6023000, + 0x04bd0002, +/* 0x0448: main */ + 0xf40031f4, + 0x240d0028, + 0x0000377e, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1d, + 0x20bd0602, + 0xb60412fd, + 0x1efd01e4, + 0x0018fe05, + 0x00051b7e, +/* 0x0477: main_not_ctx_xfer */ + 0x94d40ef4, + 0xf5f010ef, + 0x02f87e01, + 0xc70ef400, +/* 0x0484: ih */ + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e240d1f, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0x0e000004, + 0x1d004001, + 0xbd000ef6, +/* 0x04c1: ih_no_fifo */ + 0x01004004, + 0xbd000af6, + 0xfcf0fc04, + 0xfcd0fce0, + 0xfca0fcb0, + 0xfe80fc90, + 0x80fc0088, + 0xf80032f4, +/* 0x04e1: hub_barrier_done */ + 0x98010f01, + 0xfebb040e, + 0x8effb204, + 0x7e409418, + 0xf800008f, +/* 0x04f5: ctx_redswitch */ + 0x80200f00, + 0xf6018500, + 0x04bd000f, +/* 0x0502: ctx_redswitch_delay */ + 0xe2b6080e, + 0xfd1bf401, + 0x0800f5f1, + 0x0200f5f1, 0x01850080, 0xbd000ff6, -/* 0x04ff: ctx_redswitch_delay */ - 0xb6080e04, - 0x1bf401e2, - 0x00f5f1fd, - 0x00f5f108, - 0x85008002, - 0x000ff601, - 0x00f804bd, -/* 0x0518: ctx_xfer */ - 0x02810080, - 0xbd000ff6, - 0x0711f404, - 0x0004f27e, -/* 0x0528: ctx_xfer_not_load */ - 0x0002167e, - 0xfc8024bd, - 0x02f60247, - 0xf004bd00, - 0x20b6012c, - 0x4afc8003, +/* 0x051b: ctx_xfer */ + 0x8000f804, + 0xf6028100, + 0x04bd000f, + 0x7e0711f4, +/* 0x052b: ctx_xfer_not_load */ + 0x7e0004f5, + 0xbd000216, + 0x47fc8024, 0x0002f602, - 0xacf004bd, - 0x02a5f001, - 0x5000008b, - 0xb6040c98, - 0xbcbb0fc4, - 0x000c9800, - 0x0e010d98, - 0x013d7e00, - 0x01acf000, - 0x5040008b, - 0xb6040c98, - 0xbcbb0fc4, - 0x010c9800, - 0x98020d98, - 0x004e060f, - 0x013d7e08, - 0x01acf000, - 0x8b04a5f0, - 0x98503000, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x8b02a5f0, + 0x98500000, 0xc4b6040c, 0x00bcbb0f, - 0x98020c98, - 0x0f98030d, - 0x02004e08, + 0x98000c98, + 0x000e010d, 0x00013d7e, - 0x00020a7e, - 0xf40601f4, -/* 0x05b2: ctx_xfer_post */ - 0x277e0712, -/* 0x05b6: ctx_xfer_done */ - 0xde7e0002, - 0x00f80004, - 0x00000000, + 0x8b01acf0, + 0x98504000, + 0xc4b6040c, + 0x00bcbb0f, + 0x98010c98, + 0x0f98020d, + 0x08004e06, + 0x00013d7e, + 0xf001acf0, + 0x008b04a5, + 0x0c985030, + 0x0fc4b604, + 0x9800bcbb, + 0x0d98020c, + 0x080f9803, + 0x7e02004e, + 0x7e00013d, + 0xf400020a, + 0x12f40601, +/* 0x05b5: ctx_xfer_post */ + 0x02277e07, +/* 0x05b9: ctx_xfer_done */ + 0x04e17e00, + 0x0000f800, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h index 51f5c3c6e966..11bf363a6ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h @@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05b04104, + 0x05b34104, 0x400010fe, 0x00f60700, 0x0204bd00, @@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = { 0xc900800f, 0x0002f601, 0x308e04bd, - 0x24bd500c, - 0x44bd34bd, -/* 0x03b0: init_unk_loop */ - 0x0000657e, - 0xf400f6b0, - 0x010f0e0b, - 0xfd04f2bb, - 0x30b6054f, -/* 0x03c5: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40226, -/* 0x03d1: init_unk_done */ - 0x0703b5e2, - 0x820804b5, - 0xcf020100, - 0x34bd0022, - 0x80082595, - 0xf601c000, + 0xe5f0500c, + 0xbd24bd01, +/* 0x03b3: init_unk_loop */ + 0x7e44bd34, + 0xb0000065, + 0x0bf400f6, + 0xbb010f0e, + 0x4ffd04f2, + 0x0130b605, +/* 0x03c8: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf402, +/* 0x03d4: init_unk_done */ + 0xb50703b5, + 0x00820804, + 0x22cf0201, + 0x9534bd00, + 0x00800825, + 0x05f601c0, + 0x8004bd00, + 0xf601c100, 0x04bd0005, - 0x01c10080, - 0xbd0005f6, - 0x000e9804, - 0x7e010f98, - 0xbb000120, - 0x3fbb002f, - 0x010e9800, - 0x7e020f98, - 0x98000120, - 0xeffd050e, - 0x002ebb00, - 0x98003ebb, - 0x0f98020e, - 0x01207e03, - 0x070e9800, - 0xbb00effd, - 0x3ebb002e, - 0x0235b600, - 0x01d30080, - 0xbd0003f6, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb20834b6, - 0x02687e2f, - 0x002fbb00, - 0x0f003fbb, - 0x8effb23f, - 0xf0501d60, - 0x8f7e01e5, - 0x0c0f0000, - 0xa88effb2, - 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0xb23f0f00, - 0x1d608eff, - 0x01e5f050, - 0x00008f7e, - 0xffb2000f, - 0x501d9c8e, - 0x7e01e5f0, - 0x0f00008f, - 0x03147e01, - 0x8effb200, + 0x98000e98, + 0x207e010f, + 0x2fbb0001, + 0x003fbb00, + 0x98010e98, + 0x207e020f, + 0x0e980001, + 0x00effd05, + 0xbb002ebb, + 0x0e98003e, + 0x030f9802, + 0x0001207e, + 0xfd070e98, + 0x2ebb00ef, + 0x003ebb00, + 0x800235b6, + 0xf601d300, + 0x04bd0003, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb20834, + 0x0002687e, + 0xbb002fbb, + 0x3f0f003f, + 0x501d608e, + 0xb201e5f0, + 0x008f7eff, + 0x8e0c0f00, 0xf0501da8, - 0x8f7e01e5, - 0xff0f0000, - 0x988effb2, + 0xffb201e5, + 0x00008f7e, + 0x0003147e, + 0x608e3f0f, 0xe5f0501d, - 0x008f7e01, - 0xb2020f00, - 0x1da88eff, + 0x7effb201, + 0x0f00008f, + 0x1d9c8e00, 0x01e5f050, - 0x00008f7e, + 0x8f7effb2, + 0x010f0000, 0x0003147e, - 0x85050498, - 0x98504000, - 0x64b60406, - 0x0056bb0f, -/* 0x04e0: tpc_strand_init_tpc_loop */ - 0x05705eb8, - 0x00657e00, - 0xbdf6b200, -/* 0x04ed: tpc_strand_init_idx_loop */ - 0x605eb874, - 0x7fb20005, - 0x00008f7e, - 0x05885eb8, - 0x082f9500, - 0x00008f7e, - 0x058c5eb8, - 0x082f9500, + 0x501da88e, + 0xb201e5f0, + 0x008f7eff, + 0x8eff0f00, + 0xf0501d98, + 0xffb201e5, 0x00008f7e, - 0x05905eb8, - 0x00657e00, - 0x06f5b600, - 0xb601f0b6, - 0x2fbb08f4, - 0x003fbb00, - 0xb60170b6, - 0x1bf40162, - 0x0050b7bf, - 0x0142b608, - 0x0fa81bf4, - 0x8effb23f, - 0xf0501d60, - 0x8f7e01e5, - 0x0d0f0000, - 0xa88effb2, + 0xa88e020f, 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0x01008000, - 0x0003f602, - 0x24bd04bd, - 0x801f29f0, - 0xf6023000, - 0x04bd0002, -/* 0x0574: main */ - 0xf40031f4, - 0x240d0028, - 0x0000377e, - 0xb0f401f4, - 0x18f404e4, - 0x0181fe1d, - 0x20bd0602, - 0xb60412fd, - 0x1efd01e4, - 0x0018fe05, - 0x0006477e, -/* 0x05a3: main_not_ctx_xfer */ - 0x94d40ef4, - 0xf5f010ef, - 0x02f87e01, - 0xc70ef400, -/* 0x05b0: ih */ - 0x88fe80f9, - 0xf980f901, - 0xf9a0f990, - 0xf9d0f9b0, - 0xbdf0f9e0, - 0x02004a04, - 0xc400aacf, - 0x0bf404ab, - 0x4e240d1f, - 0xeecf1a00, - 0x19004f00, - 0x7e00ffcf, - 0x0e000004, - 0x1d004001, - 0xbd000ef6, -/* 0x05ed: ih_no_fifo */ - 0x01004004, - 0xbd000af6, - 0xfcf0fc04, - 0xfcd0fce0, - 0xfca0fcb0, - 0xfe80fc90, - 0x80fc0088, - 0xf80032f4, -/* 0x060d: hub_barrier_done */ - 0x98010f01, - 0xfebb040e, - 0x8effb204, - 0x7e409418, - 0xf800008f, -/* 0x0621: ctx_redswitch */ - 0x80200f00, + 0x7effb201, + 0x7e00008f, + 0x98000314, + 0x00850504, + 0x06985040, + 0x0f64b604, +/* 0x04e3: tpc_strand_init_tpc_loop */ + 0xb80056bb, + 0x0005705e, + 0x0000657e, + 0x74bdf6b2, +/* 0x04f0: tpc_strand_init_idx_loop */ + 0x05605eb8, + 0x7e7fb200, + 0xb800008f, + 0x0005885e, + 0x7e082f95, + 0xb800008f, + 0x00058c5e, + 0x7e082f95, + 0xb800008f, + 0x0005905e, + 0x0000657e, + 0xb606f5b6, + 0xf4b601f0, + 0x002fbb08, + 0xb6003fbb, + 0x62b60170, + 0xbf1bf401, + 0x080050b7, + 0xf40142b6, + 0x3f0fa81b, + 0x501d608e, + 0xb201e5f0, + 0x008f7eff, + 0x8e0d0f00, + 0xf0501da8, + 0xffb201e5, + 0x00008f7e, + 0x0003147e, + 0x02010080, + 0xbd0003f6, + 0xf024bd04, + 0x00801f29, + 0x02f60230, +/* 0x0577: main */ + 0xf404bd00, + 0x28f40031, + 0x7e240d00, + 0xf4000037, + 0xe4b0f401, + 0x1d18f404, + 0x020181fe, + 0xfd20bd06, + 0xe4b60412, + 0x051efd01, + 0x7e0018fe, + 0xf400064a, +/* 0x05a6: main_not_ctx_xfer */ + 0xef94d40e, + 0x01f5f010, + 0x0002f87e, +/* 0x05b3: ih */ + 0xf9c70ef4, + 0x0188fe80, + 0x90f980f9, + 0xb0f9a0f9, + 0xe0f9d0f9, + 0x04bdf0f9, + 0xcf02004a, + 0xabc400aa, + 0x1f0bf404, + 0x004e240d, + 0x00eecf1a, + 0xcf19004f, + 0x047e00ff, + 0x010e0000, + 0xf61d0040, + 0x04bd000e, +/* 0x05f0: ih_no_fifo */ + 0xf6010040, + 0x04bd000a, + 0xe0fcf0fc, + 0xb0fcd0fc, + 0x90fca0fc, + 0x88fe80fc, + 0xf480fc00, + 0x01f80032, +/* 0x0610: hub_barrier_done */ + 0x0e98010f, + 0x04febb04, + 0x188effb2, + 0x8f7e4094, + 0x00f80000, +/* 0x0624: ctx_redswitch */ + 0x0080200f, + 0x0ff60185, + 0x0e04bd00, +/* 0x0631: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0x800200f5, 0xf6018500, 0x04bd000f, -/* 0x062e: ctx_redswitch_delay */ - 0xe2b6080e, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x01850080, - 0xbd000ff6, -/* 0x0647: ctx_xfer */ - 0x8000f804, - 0xf6028100, - 0x04bd000f, - 0xc48effb2, - 0xe5f0501d, - 0x008f7e01, - 0x0711f400, - 0x0006217e, -/* 0x0664: ctx_xfer_not_load */ - 0x0002167e, - 0xfc8024bd, - 0x02f60247, - 0xf004bd00, - 0x20b6012c, - 0x4afc8003, +/* 0x064a: ctx_xfer */ + 0x008000f8, + 0x0ff60281, + 0x8e04bd00, + 0xf0501dc4, + 0xffb201e5, + 0x00008f7e, + 0x7e0711f4, +/* 0x0667: ctx_xfer_not_load */ + 0x7e000624, + 0xbd000216, + 0x47fc8024, 0x0002f602, - 0x0c0f04bd, - 0xa88effb2, - 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0xb23f0f00, - 0x1d608eff, - 0x01e5f050, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x8e0c0f04, + 0xf0501da8, + 0xffb201e5, 0x00008f7e, - 0xffb2000f, - 0x501d9c8e, - 0x7e01e5f0, + 0x0003147e, + 0x608e3f0f, + 0xe5f0501d, + 0x7effb201, 0x0f00008f, - 0x03147e01, - 0x01fcf000, - 0xb203f0b6, - 0x1da88eff, + 0x1d9c8e00, 0x01e5f050, - 0x00008f7e, - 0xf001acf0, - 0x008b02a5, - 0x0c985000, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x7e000e01, - 0xf000013d, - 0x008b01ac, - 0x0c985040, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x7e08004e, - 0xf000013d, + 0x8f7effb2, + 0x010f0000, + 0x0003147e, + 0xb601fcf0, + 0xa88e03f0, + 0xe5f0501d, + 0x7effb201, + 0xf000008f, 0xa5f001ac, - 0x30008b04, + 0x00008b02, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0x4e080f98, - 0x3d7e0200, - 0x0a7e0001, - 0x147e0002, - 0x01f40003, - 0x1a12f406, -/* 0x073c: ctx_xfer_post */ - 0x0002277e, - 0xffb20d0f, - 0x501da88e, - 0x7e01e5f0, - 0x7e00008f, -/* 0x0753: ctx_xfer_done */ - 0x7e000314, - 0xf800060d, - 0x00000000, + 0x010d9800, + 0x3d7e000e, + 0xacf00001, + 0x40008b01, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0x4e060f98, + 0x3d7e0800, + 0xacf00001, + 0x04a5f001, + 0x5030008b, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0x004e080f, + 0x013d7e02, + 0x020a7e00, + 0x03147e00, + 0x0601f400, +/* 0x073f: ctx_xfer_post */ + 0x7e1a12f4, + 0x0f000227, + 0x1da88e0d, + 0x01e5f050, + 0x8f7effb2, + 0x147e0000, +/* 0x0756: ctx_xfer_done */ + 0x107e0003, + 0x00f80006, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dda7a7d224c9..9f5dfc85147a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, static int gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) { - struct gf100_gr *gr = (void *)object->engine; + struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); union { struct fermi_a_zbc_color_v0 v0; } *args = data; @@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) static int gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) { - struct gf100_gr *gr = (void *)object->engine; + struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); union { struct fermi_a_zbc_depth_v0 v0; } *args = data; @@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base) gr->ppc_nr[i] = gr->func->ppc_nr; for (j = 0; j < gr->ppc_nr[i]; j++) { u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); + if (mask) + gr->ppc_mask[i] |= (1 << j); gr->ppc_tpc_nr[i][j] = hweight8(mask); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 4611961b1187..02e78b8d93f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -97,6 +97,7 @@ struct gf100_gr { u8 tpc_nr[GPC_MAX]; u8 tpc_total; u8 ppc_nr[GPC_MAX]; + u8 ppc_mask[GPC_MAX]; u8 ppc_tpc_nr[GPC_MAX][4]; struct nvkm_memory *unk4188b4; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 895ba74057d4..1d7dd38292b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -97,7 +97,9 @@ static void * nvkm_instobj_dtor(struct nvkm_memory *memory) { struct nvkm_instobj *iobj = nvkm_instobj(memory); + spin_lock(&iobj->imem->lock); list_del(&iobj->head); + spin_unlock(&iobj->imem->lock); nvkm_memory_del(&iobj->parent); return iobj; } @@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); iobj->parent = memory; iobj->imem = imem; + spin_lock(&iobj->imem->lock); list_add_tail(&iobj->head, &imem->list); + spin_unlock(&iobj->imem->lock); memory = &iobj->memory; } @@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, { nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); imem->func = func; + spin_lock_init(&imem->lock); INIT_LIST_HEAD(&imem->list); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index b61509e26ec9..b735173a18ff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c @@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) duty = (uv - bios->base) * div / bios->pwm_range; nvkm_wr32(device, 0x20340, div); - nvkm_wr32(device, 0x20344, 0x8000000 | duty); + nvkm_wr32(device, 0x20344, 0x80000000 | duty); return 0; } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 5c367aad8a6e..130fca70bfd7 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -172,9 +172,9 @@ void copy_timings_drm_to_omap(struct omap_video_timings *timings, uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats, uint32_t max_formats, enum omap_color_mode supported_modes); struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, - struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); @@ -248,7 +248,7 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); static inline int objects_lookup(struct drm_device *dev, struct drm_file *filp, uint32_t pixel_format, - struct drm_gem_object **bos, uint32_t *handles) + struct drm_gem_object **bos, const uint32_t *handles) { int i, n = drm_format_num_planes(pixel_format); diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 636a1f921569..ad202dfc1a49 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -364,7 +364,7 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) #endif struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, - struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *bos[4]; struct drm_framebuffer *fb; @@ -386,7 +386,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, } struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct omap_framebuffer *omap_fb = NULL; struct drm_framebuffer *fb = NULL; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 183aea1abebc..cddba079197f 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -521,7 +521,7 @@ static const struct drm_framebuffer_funcs qxl_fb_funcs = { int qxl_framebuffer_init(struct drm_device *dev, struct qxl_framebuffer *qfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -1003,7 +1003,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output) static struct drm_framebuffer * qxl_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct qxl_framebuffer *qxl_fb; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 01a86948eb8c..6e6b9b1519b8 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -390,7 +390,7 @@ void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state); int qxl_framebuffer_init(struct drm_device *dev, struct qxl_framebuffer *rfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); void qxl_display_read_client_monitors_config(struct qxl_device *qdev); void qxl_send_monitors_config(struct qxl_device *qdev); diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index c4a552637c93..7136e521e6db 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -40,7 +40,6 @@ struct qxl_fbdev { struct drm_fb_helper helper; struct qxl_framebuffer qfb; - struct list_head fbdev_list; struct qxl_device *qdev; spinlock_t delayed_ops_lock; @@ -283,7 +282,7 @@ int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, } static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct qxl_device *qdev = qfbdev->qdev; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 248953d2fdb7..0154db43860c 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8472,7 +8472,7 @@ restart_ih: if (queue_dp) schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_reset) { rdev->needs_reset = true; wake_up_all(&rdev->fence_queue); @@ -9630,6 +9630,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev, (rdev->disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 7f33767d7ed6..2ad462896896 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -5344,7 +5347,7 @@ restart_ih: if (queue_dp) schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); if (queue_thermal && rdev->pm.dpm_enabled) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 238b13f045c1..9e7e2bf03b81 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev) status = r100_irq_ack(rdev); } if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS400: @@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) uint32_t pixel_bytes1 = 0; uint32_t pixel_bytes2 = 0; + /* Guess line buffer size to be 8192 pixels */ + u32 lb_size = 8192; + if (!rdev->mode_info.mode_config_initialized) return; @@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev) DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); } + + /* Save number of lines the linebuffer leads before the scanout */ + if (mode1) + rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); + + if (mode2) + rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); } int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4ea5b10ff5f4..cc2fdf0be37a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4276,7 +4276,7 @@ restart_ih: WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); if (queue_thermal && rdev->pm.dpm_enabled) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b6cbd816537e..87db64983ea8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2414,7 +2414,7 @@ struct radeon_device { struct r600_ih ih; /* r6/700 interrupt ring */ struct radeon_rlc rlc; struct radeon_mec mec; - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct work_struct dp_work; struct work_struct audio_work; int num_crtc; /* number of crtcs */ diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index fe994aac3b04..c77d349c561c 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, PCI_VENDOR_ID_IBM, 0x0550, 1}, + /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x054d, 1}, /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, PCI_VENDOR_ID_IBM, 0x0530, 1}, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5a2cafb4f1bc..340f3f549f29 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (r < 0) return connector_status_disconnected; + if (radeon_connector->detected_hpd_without_ddc) { + force = true; + radeon_connector->detected_hpd_without_ddc = false; + } + if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; goto exit; } - if (radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) { dret = radeon_ddc_probe(radeon_connector, false); + + /* Sometimes the pins required for the DDC probe on DVI + * connectors don't make contact at the same time that the ones + * for HPD do. If the DDC probe fails even though we had an HPD + * signal, try again later */ + if (!dret && !force && + connector->status != connector_status_connected) { + DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n"); + radeon_connector->detected_hpd_without_ddc = true; + schedule_delayed_work(&rdev->hotplug_work, + msecs_to_jiffies(1000)); + goto exit; + } + } if (dret) { radeon_connector->detected_by_load = false; radeon_connector_free_edid(connector); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index a8d9927ed9eb..b3bb92368ae0 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) * to complete in this vblank? */ if (update_pending && - (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, + (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, + crtc_id, + USE_REAL_VBLANKSTART, &vpos, &hpos, NULL, NULL, &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || @@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &radeon_crtc->base; unsigned long flags; int r; + int vpos, hpos, stat, min_udelay; + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; down_read(&rdev->exclusive_lock); if (work->fence) { @@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work) /* set the proper interrupt */ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + /* If this happens to execute within the "virtually extended" vblank + * interval before the start of the real vblank interval then it needs + * to delay programming the mmio flip until the real vblank is entered. + * This prevents completing a flip too early due to the way we fudge + * our vblank counter and vblank timestamps in order to work around the + * problem that the hw fires vblank interrupts before actual start of + * vblank (when line buffer refilling is done for a frame). It + * complements the fudging logic in radeon_get_crtc_scanoutpos() for + * timestamping and radeon_get_vblank_counter_kms() for vblank counts. + * + * In practice this won't execute very often unless on very fast + * machines because the time window for this to happen is very small. + */ + for (;;) { + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank + * start in hpos, and to the "fudged earlier" vblank start in + * vpos. + */ + stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, + GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode); + + if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || + !(vpos >= 0 && hpos <= 0)) + break; + + /* Sleep at least until estimated real start of hw vblank */ + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); + usleep_range(min_udelay, 2 * min_udelay); + spin_lock_irqsave(&crtc->dev->event_lock, flags); + }; + /* do the flip (mmio) */ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); @@ -1292,7 +1331,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { int radeon_framebuffer_init(struct drm_device *dev, struct radeon_framebuffer *rfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -1309,7 +1348,7 @@ radeon_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * radeon_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct radeon_framebuffer *radeon_fb; @@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * \param dev Device to query. * \param crtc Crtc to query. * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * For driver internal use only also supports these flags: + * + * USE_REAL_VBLANKSTART to use the real start of vblank instead + * of a fudged earlier start of vblank. + * + * GET_DISTANCE_TO_VBLANKSTART to return distance to the + * fudged earlier start of vblank in *vpos and the distance + * to true start of vblank in *hpos. + * * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * \param *stime Target location for timestamp taken immediately before @@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, vbl_end = 0; } + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; + } + + /* Fudge vblank to start a few scanlines earlier to handle the + * problem that vblank irqs fire a few scanlines before start + * of vblank. Some driver internal callers need the true vblank + * start to be used and signal this via the USE_REAL_VBLANKSTART flag. + * + * The cause of the "early" vblank irq is that the irq is triggered + * by the line buffer logic when the line buffer read position enters + * the vblank, whereas our crtc scanout position naturally lags the + * line buffer read position. + */ + if (!(flags & USE_REAL_VBLANKSTART)) + vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; + /* Test scanout position against vblank region. */ if ((*vpos < vbl_start) && (*vpos >= vbl_end)) in_vbl = false; + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_IN_VBLANK; + + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from fudged earlier vbl_start */ + *vpos -= vbl_start; + return ret; + } + /* Check if inside vblank area and apply corrective offsets: * vpos will then be >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until @@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* Correct for shifted end of vbl at vbl_end. */ *vpos = *vpos - vbl_end; - /* In vblank? */ - if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; - - /* Is vpos outside nominal vblank area, but less than - * 1/100 of a frame height away from start of vblank? - * If so, assume this isn't a massively delayed vblank - * interrupt, but a vblank interrupt that fired a few - * microseconds before true start of vblank. Compensate - * by adding a full frame duration to the final timestamp. - * Happens, e.g., on ATI R500, R600. - * - * We only do this if DRM_CALLED_FROM_VBLIRQ. - */ - if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { - vbl_start = mode->crtc_vdisplay; - vtotal = mode->crtc_vtotal; - - if (vbl_start - *vpos < vtotal / 100) { - *vpos -= vtotal; - - /* Signal this correction as "applied". */ - ret |= 0x8; - } - } - return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 26da2f4d7b4f..adc44bbc81a9 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -44,7 +44,6 @@ struct radeon_fbdev { struct drm_fb_helper helper; struct radeon_framebuffer rfb; - struct list_head fbdev_list; struct radeon_device *rdev; }; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 171d3e43c30c..979f3bf65f2c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, - hotplug_work); + hotplug_work.work); struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; @@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) } } - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); INIT_WORK(&rdev->dp_work, radeon_dp_work_func); INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); @@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); if (r) { rdev->irq.installed = false; - flush_work(&rdev->hotplug_work); + flush_delayed_work(&rdev->hotplug_work); return r; } @@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); - flush_work(&rdev->hotplug_work); + flush_delayed_work(&rdev->hotplug_work); } } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0ec6fcca16d3..d290a8a09036 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, */ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { + int vpos, hpos, stat; + u32 count; struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= rdev->num_crtc) { @@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) return -EINVAL; } - return radeon_get_vblank_counter(rdev, crtc); + /* The hw increments its frame counter at start of vsync, not at start + * of vblank, as is required by DRM core vblank counter handling. + * Cook the hw count here to make it appear to the caller as if it + * incremented at start of vblank. We measure distance to start of + * vblank in vpos. vpos therefore will be >= 0 between start of vblank + * and start of vsync, so vpos >= 0 means to bump the hw frame counter + * result by 1 to give the proper appearance to caller. + */ + if (rdev->mode_info.crtcs[crtc]) { + /* Repeat readout if needed to provide stable result if + * we cross start of vsync during the queries. + */ + do { + count = radeon_get_vblank_counter(rdev, crtc); + /* Ask radeon_get_crtc_scanoutpos to return vpos as + * distance to start of vblank, instead of regular + * vertical scanout pos. + */ + stat = radeon_get_crtc_scanoutpos( + dev, crtc, GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &rdev->mode_info.crtcs[crtc]->base.hwmode); + } while (count != radeon_get_vblank_counter(rdev, crtc)); + + if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { + DRM_DEBUG_VBL("Query failed! stat %d\n", stat); + } + else { + DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", + crtc, vpos); + + /* Bump counter if we are at >= leading edge of vblank, + * but before vsync where vpos would turn negative and + * the hw counter really increments. + */ + if (vpos >= 0) + count++; + } + } + else { + /* Fallback to use value as is. */ + count = radeon_get_vblank_counter(rdev, crtc); + DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); + } + + return count; } /** diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 830e171c3a9e..cddd41b32eda 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -367,6 +367,7 @@ struct radeon_crtc { u32 line_time; u32 wm_low; u32 wm_high; + u32 lb_vblank_lead_lines; struct drm_display_mode hw_mode; enum radeon_output_csc output_csc; }; @@ -553,6 +554,7 @@ struct radeon_connector { void *con_priv; bool dac_load_detect; bool detected_by_load; /* if the connection status was determined by load */ + bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ uint16_t connector_object_id; struct radeon_hpd hpd; struct radeon_router router; @@ -686,6 +688,9 @@ struct atom_voltage_table struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; }; +/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */ +#define USE_REAL_VBLANKSTART (1 << 30) +#define GET_DISTANCE_TO_VBLANKSTART (1 << 31) extern void radeon_add_atom_connector(struct drm_device *dev, @@ -929,7 +934,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green u16 *blue, int regno); int radeon_framebuffer_init(struct drm_device *dev, struct radeon_framebuffer *rfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f4f03dcc1530..59abebd6b5dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { - vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, + vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, + crtc, + USE_REAL_VBLANKSTART, &vpos, &hpos, NULL, NULL, &rdev->mode_info.crtcs[crtc]->base.hwmode); if ((vbl_status & DRM_SCANOUTPOS_VALID) && diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 97a904835759..6244f4e44e9a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev) status = rs600_irq_ack(rdev); } if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); if (rdev->msi_enabled) { diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 516ca27cfa12..6bc44c24e837 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, { u32 tmp; + /* Guess line buffer size to be 8192 pixels */ + u32 lb_size = 8192; + /* * Line Buffer Setup * There is a single line buffer shared by both display controllers. @@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; } WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); + + /* Save number of lines the linebuffer leads before the scanout */ + if (mode1) + rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); + + if (mode2) + rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); } struct rs690_watermark { diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c index 3f5e1cf138ba..d37ba2cb886e 100644 --- a/drivers/gpu/drm/radeon/rv730_dpm.c +++ b/drivers/gpu/drm/radeon/rv730_dpm.c @@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev) result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); if (result != PPSMC_Result_OK) - DRM_ERROR("Could not force DPM to low\n"); + DRM_DEBUG("Could not force DPM to low\n"); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b9c770745a7a..e830c8935db0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev) result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); if (result != PPSMC_Result_OK) - DRM_ERROR("Could not force DPM to low.\n"); + DRM_DEBUG("Could not force DPM to low.\n"); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); @@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev) int rv770_set_sw_state(struct radeon_device *rdev) { if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) - return -EINVAL; + DRM_DEBUG("rv770_set_sw_state failed\n"); return 0; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 07037e32dea3..f878d6962da5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev, c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -6848,7 +6851,7 @@ restart_ih: if (queue_dp) schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index ca12e8ca5552..43bce69d8560 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -136,7 +136,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, static struct drm_framebuffer * rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct rcar_du_device *rcdu = dev->dev_private; const struct rcar_du_format_info *format; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 002645bb5bbf..b8ac5911c102 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -72,7 +72,7 @@ static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = { }; static struct rockchip_drm_fb * -rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, +rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **obj, unsigned int num_planes) { struct rockchip_drm_fb *rockchip_fb; @@ -102,7 +102,7 @@ rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, static struct drm_framebuffer * rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct rockchip_drm_fb *rockchip_fb; struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER]; @@ -173,7 +173,7 @@ static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { struct drm_framebuffer * rockchip_drm_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct rockchip_drm_fb *rockchip_fb; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h index 09574d48226f..2fe47f1ee98f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h @@ -17,7 +17,7 @@ struct drm_framebuffer * rockchip_drm_framebuffer_init(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 8caea0a33dd8..d908321b94ce 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, &rk_obj->dma_attrs); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d8ae5e49c44..03c47eeadc81 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = { .data = &rk3288_vop }, {}, }; +MODULE_DEVICE_TABLE(of, vop_driver_dt_match); static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) { @@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane, val = (dest.y2 - dest.y1 - 1) << 16; val |= (dest.x2 - dest.x1 - 1) & 0xffff; VOP_WIN_SET(vop, win, dsp_info, val); - val = (dsp_sty - 1) << 16; - val |= (dsp_stx - 1) & 0xffff; + val = dsp_sty << 16; + val |= dsp_stx & 0xffff; VOP_WIN_SET(vop, win, dsp_st, val); VOP_WIN_SET(vop, win, rb_swap, rb_swap); @@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win, if (state->event) { spin_lock_irqsave(&drm->event_lock, flags); - drm_send_vblank_event(drm, -1, state->event); + drm_crtc_send_vblank_event(crtc, state->event); spin_unlock_irqrestore(&drm->event_lock, flags); } @@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop) return PTR_ERR(vop->dclk); } - ret = clk_prepare(vop->hclk); - if (ret < 0) { - dev_err(vop->dev, "failed to prepare hclk\n"); - return ret; - } - ret = clk_prepare(vop->dclk); if (ret < 0) { dev_err(vop->dev, "failed to prepare dclk\n"); - goto err_unprepare_hclk; + return ret; } - ret = clk_prepare(vop->aclk); + /* Enable both the hclk and aclk to setup the vop */ + ret = clk_prepare_enable(vop->hclk); if (ret < 0) { - dev_err(vop->dev, "failed to prepare aclk\n"); + dev_err(vop->dev, "failed to prepare/enable hclk\n"); goto err_unprepare_dclk; } - /* - * enable hclk, so that we can config vop register. - */ - ret = clk_enable(vop->hclk); + ret = clk_prepare_enable(vop->aclk); if (ret < 0) { - dev_err(vop->dev, "failed to prepare aclk\n"); - goto err_unprepare_aclk; + dev_err(vop->dev, "failed to prepare/enable aclk\n"); + goto err_disable_hclk; } + /* * do hclk_reset, reset all vop registers. */ @@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop) if (IS_ERR(ahb_rst)) { dev_err(vop->dev, "failed to get ahb reset\n"); ret = PTR_ERR(ahb_rst); - goto err_disable_hclk; + goto err_disable_aclk; } reset_control_assert(ahb_rst); usleep_range(10, 20); @@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop) if (IS_ERR(vop->dclk_rst)) { dev_err(vop->dev, "failed to get dclk reset\n"); ret = PTR_ERR(vop->dclk_rst); - goto err_unprepare_aclk; + goto err_disable_aclk; } reset_control_assert(vop->dclk_rst); usleep_range(10, 20); reset_control_deassert(vop->dclk_rst); clk_disable(vop->hclk); + clk_disable(vop->aclk); vop->is_enabled = false; return 0; +err_disable_aclk: + clk_disable_unprepare(vop->aclk); err_disable_hclk: - clk_disable(vop->hclk); -err_unprepare_aclk: - clk_unprepare(vop->aclk); + clk_disable_unprepare(vop->hclk); err_unprepare_dclk: clk_unprepare(vop->dclk); -err_unprepare_hclk: - clk_unprepare(vop->hclk); return ret; } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c index aaf98ace4a90..388a0fc13564 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c @@ -104,7 +104,7 @@ const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc) static struct drm_framebuffer * shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { const struct shmob_drm_format_info *format; diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 74d9d621453d..63ebb154b9b5 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -16,18 +16,6 @@ config DRM_TEGRA if DRM_TEGRA -config DRM_TEGRA_FBDEV - bool "Enable legacy fbdev support" - select DRM_KMS_FB_HELPER - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - default y - help - Choose this option if you have a need for the legacy fbdev support. - Note that this support also provides the Linux console on top of - the Tegra modesetting driver. - config DRM_TEGRA_DEBUG bool "NVIDIA Tegra DRM debug support" help diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 159ef515cab1..e0f827790a5e 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -106,7 +106,7 @@ static int tegra_atomic_commit(struct drm_device *drm, static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { .fb_create = tegra_fb_create, -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION .output_poll_changed = tegra_fb_output_poll_changed, #endif .atomic_check = drm_atomic_helper_check, @@ -260,7 +260,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context) static void tegra_drm_lastclose(struct drm_device *drm) { -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_drm *tegra = drm->dev_private; tegra_fbdev_restore_mode(tegra->fbdev); diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index ec49275ffb24..d88a2d18c1a4 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -30,7 +30,7 @@ struct tegra_fb { unsigned int num_planes; }; -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_fbdev { struct drm_fb_helper base; struct tegra_fb *fb; @@ -46,7 +46,7 @@ struct tegra_drm { struct mutex clients_lock; struct list_head clients; -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_fbdev *fbdev; #endif @@ -268,12 +268,12 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, struct tegra_bo_tiling *tiling); struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, struct drm_file *file, - struct drm_mode_fb_cmd2 *cmd); + const struct drm_mode_fb_cmd2 *cmd); int tegra_drm_fb_prepare(struct drm_device *drm); void tegra_drm_fb_free(struct drm_device *drm); int tegra_drm_fb_init(struct drm_device *drm); void tegra_drm_fb_exit(struct drm_device *drm); -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); void tegra_fb_output_poll_changed(struct drm_device *drm); #endif diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 1004075fd088..ede9e94f3312 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -18,7 +18,7 @@ static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb) return container_of(fb, struct tegra_fb, base); } -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper) { return container_of(helper, struct tegra_fbdev, base); @@ -92,7 +92,7 @@ static struct drm_framebuffer_funcs tegra_fb_funcs = { }; static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct tegra_bo **planes, unsigned int num_planes) { @@ -131,7 +131,7 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, struct drm_file *file, - struct drm_mode_fb_cmd2 *cmd) + const struct drm_mode_fb_cmd2 *cmd) { unsigned int hsub, vsub, i; struct tegra_bo *planes[4]; @@ -181,7 +181,7 @@ unreference: return ERR_PTR(err); } -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION static struct fb_ops tegra_fb_ops = { .owner = THIS_MODULE, .fb_fillrect = drm_fb_helper_sys_fillrect, @@ -370,7 +370,7 @@ void tegra_fb_output_poll_changed(struct drm_device *drm) int tegra_drm_fb_prepare(struct drm_device *drm) { -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_drm *tegra = drm->dev_private; tegra->fbdev = tegra_fbdev_create(drm); @@ -383,7 +383,7 @@ int tegra_drm_fb_prepare(struct drm_device *drm) void tegra_drm_fb_free(struct drm_device *drm) { -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_drm *tegra = drm->dev_private; tegra_fbdev_free(tegra->fbdev); @@ -392,7 +392,7 @@ void tegra_drm_fb_free(struct drm_device *drm) int tegra_drm_fb_init(struct drm_device *drm) { -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_drm *tegra = drm->dev_private; int err; @@ -407,7 +407,7 @@ int tegra_drm_fb_init(struct drm_device *drm) void tegra_drm_fb_exit(struct drm_device *drm) { -#ifdef CONFIG_DRM_TEGRA_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct tegra_drm *tegra = drm->dev_private; tegra_fbdev_exit(tegra->fbdev); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 876cad58b1f9..4ddb21e7f52f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -46,7 +46,7 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod) static struct of_device_id tilcdc_of_match[]; static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev, - struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { return drm_fb_cma_create(dev, file_priv, mode_cmd); } diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 80adbac82bde..4a064efcea58 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -108,7 +108,7 @@ void udl_fbdev_unplug(struct drm_device *dev); struct drm_framebuffer * udl_fb_user_fb_create(struct drm_device *dev, struct drm_file *file, - struct drm_mode_fb_cmd2 *mode_cmd); + const struct drm_mode_fb_cmd2 *mode_cmd); int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 62c7b1dafaa4..200419d4d43c 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -33,7 +33,6 @@ module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); struct udl_fbdev { struct drm_fb_helper helper; struct udl_framebuffer ufb; - struct list_head fbdev_list; int fb_count; }; @@ -456,7 +455,7 @@ static const struct drm_framebuffer_funcs udlfb_funcs = { static int udl_framebuffer_init(struct drm_device *dev, struct udl_framebuffer *ufb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct udl_gem_object *obj) { int ret; @@ -624,7 +623,7 @@ void udl_fbdev_unplug(struct drm_device *dev) struct drm_framebuffer * udl_fb_user_fb_create(struct drm_device *dev, struct drm_file *file, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct udl_framebuffer *ufb; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index f545913a56c7..8e6044d7660a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -215,7 +215,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; @@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = { .save = virtio_gpu_conn_save, .restore = virtio_gpu_conn_restore, .detect = virtio_gpu_conn_detect, - .fill_modes = drm_helper_probe_single_connector_modes, + .fill_modes = drm_helper_probe_single_connector_modes_nomerge, .destroy = virtio_gpu_conn_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -465,7 +465,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) static struct drm_framebuffer * virtio_gpu_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) + const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj = NULL; struct virtio_gpu_framebuffer *virtio_gpu_fb; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 79f0abe69b64..8f486f4c7023 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -328,7 +328,7 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work); /* virtio_gpu_display.c */ int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, - struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index 6a81e084593b..2242a80866a9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -32,7 +32,6 @@ struct virtio_gpu_fbdev { struct drm_fb_helper helper; struct virtio_gpu_framebuffer vgfb; - struct list_head fbdev_list; struct virtio_gpu_device *vgdev; struct delayed_work work; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 9fcd7f82995c..e38db35132ed 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -930,7 +930,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd2) + const struct drm_mode_fb_cmd2 *mode_cmd2) { struct vmw_private *dev_priv = vmw_priv(dev); struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index ba47b30d28fa..f2e13eb8339f 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -28,6 +28,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/of_device.h> +#include <linux/of_graph.h> #include <drm/drm_fourcc.h> @@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev) struct ipu_platform_reg { struct ipu_client_platformdata pdata; const char *name; - int reg_offset; }; +/* These must be in the order of the corresponding device tree port nodes */ static const struct ipu_platform_reg client_reg[] = { { .pdata = { + .csi = 0, + .dma[0] = IPUV3_CHANNEL_CSI0, + .dma[1] = -EINVAL, + }, + .name = "imx-ipuv3-camera", + }, { + .pdata = { + .csi = 1, + .dma[0] = IPUV3_CHANNEL_CSI1, + .dma[1] = -EINVAL, + }, + .name = "imx-ipuv3-camera", + }, { + .pdata = { .di = 0, .dc = 5, .dp = IPU_DP_FLOW_SYNC_BG, @@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = { .dma[1] = -EINVAL, }, .name = "imx-ipuv3-crtc", - }, { - .pdata = { - .csi = 0, - .dma[0] = IPUV3_CHANNEL_CSI0, - .dma[1] = -EINVAL, - }, - .reg_offset = IPU_CM_CSI0_REG_OFS, - .name = "imx-ipuv3-camera", - }, { - .pdata = { - .csi = 1, - .dma[0] = IPUV3_CHANNEL_CSI1, - .dma[1] = -EINVAL, - }, - .reg_offset = IPU_CM_CSI1_REG_OFS, - .name = "imx-ipuv3-camera", }, }; @@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) for (i = 0; i < ARRAY_SIZE(client_reg); i++) { const struct ipu_platform_reg *reg = &client_reg[i]; struct platform_device *pdev; - struct resource res; - - if (reg->reg_offset) { - memset(&res, 0, sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; - res.end = res.start + PAGE_SIZE - 1; - pdev = platform_device_register_resndata(dev, reg->name, - id++, &res, 1, ®->pdata, sizeof(reg->pdata)); - } else { - pdev = platform_device_register_data(dev, reg->name, - id++, ®->pdata, sizeof(reg->pdata)); + + pdev = platform_device_alloc(reg->name, id++); + if (!pdev) { + ret = -ENOMEM; + goto err_register; + } + + pdev->dev.parent = dev; + + /* Associate subdevice with the corresponding port node */ + pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i); + if (!pdev->dev.of_node) { + dev_err(dev, "missing port@%d node in %s\n", i, + dev->of_node->full_name); + ret = -ENODEV; + goto err_register; } - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); + ret = platform_device_add_data(pdev, ®->pdata, + sizeof(reg->pdata)); + if (!ret) + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); goto err_register; } } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index ac1feea51be3..9024a3de4032 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -609,6 +609,7 @@ #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 +#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d #define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a #define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a #define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index c20ac76c0a8c..c690fae02cf8 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) struct lg_drv_data *drv_data; int ret; - /* Only work with the 1st interface (G29 presents multiple) */ - if (iface_num != 0) { + /* G29 only work with the 1st interface */ + if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && + (iface_num != 0)) { dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); return -ENODEV; } diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 94bb137abe32..2324520b006d 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -84,6 +84,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index b33f53b3ca93..bf04d2a3cf4a 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if) ptr--; *ptr++ = '\n'; *ptr = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 4a4825528188..90449e1e91e5 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -901,7 +901,7 @@ Begin: ptr--; *ptr++ = '\n'; *ptr = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); } diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index b1fad81f0722..13b2151c10f5 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c @@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs) ptr--; *ptr++ = '\n'; *ptr = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); } diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c index b420f8bd862e..ba4beb25d872 100644 --- a/drivers/isdn/hisax/q931.c +++ b/drivers/isdn/hisax/q931.c @@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size) dp--; *dp++ = '\n'; *dp = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); } @@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir) } if (finish) { *dp = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); return; } if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ @@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir) dp += sprintf(dp, "Unknown protocol %x!", buf[0]); } *dp = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index f659e605a406..86ce887b2ed6 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -123,6 +123,26 @@ void nvm_unregister_mgr(struct nvmm_type *mt) } EXPORT_SYMBOL(nvm_unregister_mgr); +/* register with device with a supported manager */ +static int register_mgr(struct nvm_dev *dev) +{ + struct nvmm_type *mt; + int ret = 0; + + list_for_each_entry(mt, &nvm_mgrs, list) { + ret = mt->register_mgr(dev); + if (ret > 0) { + dev->mt = mt; + break; /* successfully initialized */ + } + } + + if (!ret) + pr_info("nvm: no compatible nvm manager found.\n"); + + return ret; +} + static struct nvm_dev *nvm_find_nvm_dev(const char *name) { struct nvm_dev *dev; @@ -160,11 +180,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) } EXPORT_SYMBOL(nvm_erase_blk); -static void nvm_core_free(struct nvm_dev *dev) -{ - kfree(dev); -} - static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; @@ -179,12 +194,21 @@ static int nvm_core_init(struct nvm_dev *dev) dev->sec_size = grp->csecs; dev->oob_size = grp->sos; dev->sec_per_pg = grp->fpg_sz / grp->csecs; - dev->addr_mode = id->ppat; - dev->addr_format = id->ppaf; + memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); dev->plane_mode = NVM_PLANE_SINGLE; dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; + if (grp->mtype != 0) { + pr_err("nvm: memory type not supported\n"); + return -EINVAL; + } + + if (grp->fmtype != 0 && grp->fmtype != 1) { + pr_err("nvm: flash type not supported\n"); + return -EINVAL; + } + if (grp->mpos & 0x020202) dev->plane_mode = NVM_PLANE_DOUBLE; if (grp->mpos & 0x040404) @@ -213,21 +237,17 @@ static void nvm_free(struct nvm_dev *dev) if (dev->mt) dev->mt->unregister_mgr(dev); - - nvm_core_free(dev); } static int nvm_init(struct nvm_dev *dev) { - struct nvmm_type *mt; - int ret = 0; + int ret = -EINVAL; if (!dev->q || !dev->ops) - return -EINVAL; + return ret; if (dev->ops->identity(dev->q, &dev->identity)) { pr_err("nvm: device could not be identified\n"); - ret = -EINVAL; goto err; } @@ -251,21 +271,13 @@ static int nvm_init(struct nvm_dev *dev) goto err; } - /* register with device with a supported manager */ - list_for_each_entry(mt, &nvm_mgrs, list) { - ret = mt->register_mgr(dev); - if (ret < 0) - goto err; /* initialization failed */ - if (ret > 0) { - dev->mt = mt; - break; /* successfully initialized */ - } - } - - if (!ret) { - pr_info("nvm: no compatible manager found.\n"); + down_write(&nvm_lock); + ret = register_mgr(dev); + up_write(&nvm_lock); + if (ret < 0) + goto err; + if (!ret) return 0; - } pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", dev->name, dev->sec_per_pg, dev->nr_planes, @@ -273,7 +285,6 @@ static int nvm_init(struct nvm_dev *dev) dev->nr_chnls); return 0; err: - nvm_free(dev); pr_err("nvm: failed to initialize nvm\n"); return ret; } @@ -308,22 +319,26 @@ int nvm_register(struct request_queue *q, char *disk_name, if (ret) goto err_init; - down_write(&nvm_lock); - list_add(&dev->devices, &nvm_devices); - up_write(&nvm_lock); + if (dev->ops->max_phys_sect > 256) { + pr_info("nvm: max sectors supported is 256.\n"); + ret = -EINVAL; + goto err_init; + } if (dev->ops->max_phys_sect > 1) { dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, "ppalist"); if (!dev->ppalist_pool) { pr_err("nvm: could not create ppa pool\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_init; } - } else if (dev->ops->max_phys_sect > 256) { - pr_info("nvm: max sectors supported is 256.\n"); - return -EINVAL; } + down_write(&nvm_lock); + list_add(&dev->devices, &nvm_devices); + up_write(&nvm_lock); + return 0; err_init: kfree(dev); @@ -333,19 +348,22 @@ EXPORT_SYMBOL(nvm_register); void nvm_unregister(char *disk_name) { - struct nvm_dev *dev = nvm_find_nvm_dev(disk_name); + struct nvm_dev *dev; + down_write(&nvm_lock); + dev = nvm_find_nvm_dev(disk_name); if (!dev) { pr_err("nvm: could not find device %s to unregister\n", disk_name); + up_write(&nvm_lock); return; } - nvm_exit(dev); - - down_write(&nvm_lock); list_del(&dev->devices); up_write(&nvm_lock); + + nvm_exit(dev); + kfree(dev); } EXPORT_SYMBOL(nvm_unregister); @@ -358,38 +376,30 @@ static int nvm_create_target(struct nvm_dev *dev, { struct nvm_ioctl_create_simple *s = &create->conf.s; struct request_queue *tqueue; - struct nvmm_type *mt; struct gendisk *tdisk; struct nvm_tgt_type *tt; struct nvm_target *t; void *targetdata; int ret = 0; + down_write(&nvm_lock); if (!dev->mt) { - /* register with device with a supported NVM manager */ - list_for_each_entry(mt, &nvm_mgrs, list) { - ret = mt->register_mgr(dev); - if (ret < 0) - return ret; /* initialization failed */ - if (ret > 0) { - dev->mt = mt; - break; /* successfully initialized */ - } - } - - if (!ret) { - pr_info("nvm: no compatible nvm manager found.\n"); - return -ENODEV; + ret = register_mgr(dev); + if (!ret) + ret = -ENODEV; + if (ret < 0) { + up_write(&nvm_lock); + return ret; } } tt = nvm_find_target_type(create->tgttype); if (!tt) { pr_err("nvm: target type %s not found\n", create->tgttype); + up_write(&nvm_lock); return -EINVAL; } - down_write(&nvm_lock); list_for_each_entry(t, &dev->online_targets, list) { if (!strcmp(create->tgtname, t->disk->disk_name)) { pr_err("nvm: target name already exists.\n"); @@ -457,11 +467,11 @@ static void nvm_remove_target(struct nvm_target *t) lockdep_assert_held(&nvm_lock); del_gendisk(tdisk); + blk_cleanup_queue(q); + if (tt->exit) tt->exit(tdisk->private_data); - blk_cleanup_queue(q); - put_disk(tdisk); list_del(&t->list); @@ -473,7 +483,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) struct nvm_dev *dev; struct nvm_ioctl_create_simple *s; + down_write(&nvm_lock); dev = nvm_find_nvm_dev(create->dev); + up_write(&nvm_lock); if (!dev) { pr_err("nvm: device not found\n"); return -EINVAL; @@ -532,7 +544,9 @@ static int nvm_configure_show(const char *val) return -EINVAL; } + down_write(&nvm_lock); dev = nvm_find_nvm_dev(devname); + up_write(&nvm_lock); if (!dev) { pr_err("nvm: device not found\n"); return -EINVAL; @@ -541,7 +555,7 @@ static int nvm_configure_show(const char *val) if (!dev->mt) return 0; - dev->mt->free_blocks_print(dev); + dev->mt->lun_info_print(dev); return 0; } @@ -677,8 +691,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg) info->tgtsize = tgt_iter; up_write(&nvm_lock); - if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) + if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { + kfree(info); return -EFAULT; + } kfree(info); return 0; @@ -721,8 +737,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) devices->nr_devices = i; - if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices))) + if (copy_to_user(arg, devices, + sizeof(struct nvm_ioctl_get_devices))) { + kfree(devices); return -EFAULT; + } kfree(devices); return 0; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ae1fb2bdc5f4..35dde84b71e9 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -60,23 +60,27 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.nr_free_blocks = dev->blks_per_lun; + lun->vlun.nr_inuse_blocks = 0; + lun->vlun.nr_bad_blocks = 0; } return 0; } -static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, +static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, void *private) { struct gen_nvm *gn = private; - struct gen_lun *lun = &gn->luns[lun_id]; + struct nvm_dev *dev = gn->dev; + struct gen_lun *lun; struct nvm_block *blk; int i; - if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) - return 0; + lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; + + for (i = 0; i < nr_blocks; i++) { + if (blks[i] == 0) + continue; - i = -1; - while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { blk = &lun->vlun.blocks[i]; if (!blk) { pr_err("gennvm: BB data is out of bounds.\n"); @@ -84,6 +88,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, } list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; } return 0; @@ -136,6 +141,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) list_move_tail(&blk->list, &lun->used_list); blk->type = 1; lun->vlun.nr_free_blocks--; + lun->vlun.nr_inuse_blocks++; } } @@ -164,15 +170,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) block->id = cur_block_id++; /* First block is reserved for device */ - if (unlikely(lun_iter == 0 && blk_iter == 0)) + if (unlikely(lun_iter == 0 && blk_iter == 0)) { + lun->vlun.nr_free_blocks--; continue; + } list_add_tail(&block->list, &lun->free_list); } if (dev->ops->get_bb_tbl) { - ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, - dev->blks_per_lun, gennvm_block_bb, gn); + struct ppa_addr ppa; + + ppa.ppa = 0; + ppa.g.ch = lun->vlun.chnl_id; + ppa.g.lun = lun->vlun.id; + ppa = generic_to_dev_addr(dev, ppa); + + ret = dev->ops->get_bb_tbl(dev, ppa, + dev->blks_per_lun, + gennvm_block_bb, gn); if (ret) pr_err("gennvm: could not read BB table\n"); } @@ -190,6 +206,14 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) return 0; } +static void gennvm_free(struct nvm_dev *dev) +{ + gennvm_blocks_free(dev); + gennvm_luns_free(dev); + kfree(dev->mp); + dev->mp = NULL; +} + static int gennvm_register(struct nvm_dev *dev) { struct gen_nvm *gn; @@ -199,6 +223,7 @@ static int gennvm_register(struct nvm_dev *dev) if (!gn) return -ENOMEM; + gn->dev = dev; gn->nr_luns = dev->nr_luns; dev->mp = gn; @@ -216,16 +241,13 @@ static int gennvm_register(struct nvm_dev *dev) return 1; err: - kfree(gn); + gennvm_free(dev); return ret; } static void gennvm_unregister(struct nvm_dev *dev) { - gennvm_blocks_free(dev); - gennvm_luns_free(dev); - kfree(dev->mp); - dev->mp = NULL; + gennvm_free(dev); } static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, @@ -254,6 +276,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, blk->type = 1; lun->vlun.nr_free_blocks--; + lun->vlun.nr_inuse_blocks++; spin_unlock(&vlun->lock); out: @@ -271,16 +294,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) case 1: list_move_tail(&blk->list, &lun->free_list); lun->vlun.nr_free_blocks++; + lun->vlun.nr_inuse_blocks--; blk->type = 0; break; case 2: list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; + lun->vlun.nr_inuse_blocks--; break; default: WARN_ON_ONCE(1); pr_err("gennvm: erroneous block type (%lu -> %u)\n", blk->id, blk->type); list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; + lun->vlun.nr_inuse_blocks--; } spin_unlock(&vlun->lock); @@ -292,10 +320,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) if (rqd->nr_pages > 1) { for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = addr_to_generic_mode(dev, + rqd->ppa_list[i] = dev_to_generic_addr(dev, rqd->ppa_list[i]); } else { - rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); + rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); } } @@ -305,10 +333,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) if (rqd->nr_pages > 1) { for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = generic_to_addr_mode(dev, + rqd->ppa_list[i] = generic_to_dev_addr(dev, rqd->ppa_list[i]); } else { - rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); + rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); } } @@ -354,10 +382,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) { int i; - if (!dev->ops->set_bb) + if (!dev->ops->set_bb_tbl) return; - if (dev->ops->set_bb(dev->q, rqd, 1)) + if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) return; gennvm_addr_to_generic_mode(dev, rqd); @@ -440,15 +468,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) return &gn->luns[lunid].vlun; } -static void gennvm_free_blocks_print(struct nvm_dev *dev) +static void gennvm_lun_info_print(struct nvm_dev *dev) { struct gen_nvm *gn = dev->mp; struct gen_lun *lun; unsigned int i; - gennvm_for_each_lun(gn, lun, i) - pr_info("%s: lun%8u\t%u\n", - dev->name, i, lun->vlun.nr_free_blocks); + + gennvm_for_each_lun(gn, lun, i) { + spin_lock(&lun->vlun.lock); + + pr_info("%s: lun%8u\t%u\t%u\t%u\n", + dev->name, i, + lun->vlun.nr_free_blocks, + lun->vlun.nr_inuse_blocks, + lun->vlun.nr_bad_blocks); + + spin_unlock(&lun->vlun.lock); + } } static struct nvmm_type gennvm = { @@ -466,7 +503,7 @@ static struct nvmm_type gennvm = { .erase_blk = gennvm_erase_blk, .get_lun = gennvm_get_lun, - .free_blocks_print = gennvm_free_blocks_print, + .lun_info_print = gennvm_lun_info_print, }; static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index d23bd3501ddc..9c24b5b32dac 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h @@ -35,6 +35,8 @@ struct gen_lun { }; struct gen_nvm { + struct nvm_dev *dev; + int nr_luns; struct gen_lun *luns; }; diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 7ba64c87ba1c..75e59c3a3f96 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) return blk->id * rrpc->dev->pgs_per_blk; } +static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = r.ppa; + + l.ppa = 0; + + div_u64_rem(ppa, dev->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, dev->sec_per_pg); + div_u64_rem(ppa, dev->sec_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, dev->pgs_per_blk); + div_u64_rem(ppa, dev->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, dev->blks_per_lun); + div_u64_rem(ppa, dev->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, dev->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) { struct ppa_addr paddr; paddr.ppa = addr; - return __linear_to_generic_addr(dev, paddr); + return linear_to_generic_addr(dev, paddr); } /* requires lun->lock taken */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 917d47e290ae..3147c8d09ea8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -112,7 +112,8 @@ struct iv_tcw_private { * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, + DM_CRYPT_EXIT_THREAD}; /* * The fields in here must be read only after initialization. @@ -1203,20 +1204,18 @@ continue_locked: if (!RB_EMPTY_ROOT(&cc->write_tree)) goto pop_from_list; + if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { + spin_unlock_irq(&cc->write_thread_wait.lock); + break; + } + __set_current_state(TASK_INTERRUPTIBLE); __add_wait_queue(&cc->write_thread_wait, &wait); spin_unlock_irq(&cc->write_thread_wait.lock); - if (unlikely(kthread_should_stop())) { - set_task_state(current, TASK_RUNNING); - remove_wait_queue(&cc->write_thread_wait, &wait); - break; - } - schedule(); - set_task_state(current, TASK_RUNNING); spin_lock_irq(&cc->write_thread_wait.lock); __remove_wait_queue(&cc->write_thread_wait, &wait); goto continue_locked; @@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti) if (!cc) return; - if (cc->write_thread) + if (cc->write_thread) { + spin_lock_irq(&cc->write_thread_wait.lock); + set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); + wake_up_locked(&cc->write_thread_wait); + spin_unlock_irq(&cc->write_thread_wait.lock); kthread_stop(cc->write_thread); + } if (cc->io_queue) destroy_workqueue(cc->io_queue); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaa6caa46a9f..cfa29f574c2a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct multipath *m = ti->private; - struct pgpath *pgpath; unsigned long flags; int r; - r = 0; - spin_lock_irqsave(&m->lock, flags); if (!m->current_pgpath) __choose_pgpath(m, 0); - pgpath = m->current_pgpath; - - if (pgpath) { - *bdev = pgpath->path.dev->bdev; - *mode = pgpath->path.dev->mode; + if (m->current_pgpath) { + if (!m->queue_io) { + *bdev = m->current_pgpath->path.dev->bdev; + *mode = m->current_pgpath->path.dev->mode; + r = 0; + } else { + /* pg_init has not started or completed */ + r = -ENOTCONN; + } + } else { + /* No path is available */ + if (m->queue_if_no_path) + r = -ENOTCONN; + else + r = -EIO; } - if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) - r = -ENOTCONN; - else if (!*bdev) - r = -EIO; - spin_unlock_irqrestore(&m->lock, flags); - if (r == -ENOTCONN && !fatal_signal_pending(current)) { + if (r == -ENOTCONN) { spin_lock_irqsave(&m->lock, flags); if (!m->current_pg) { /* Path status changed, redo selection */ diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 3897b90bd462..63903a5a5d9e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) case PM_WRITE: if (old_mode != new_mode) notify_of_pool_mode_change(pool, "write"); + pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard_bio; @@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct thin_c *tc = ti->private; struct pool *pool = tc->pool; - struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); - if (!pool_limits->discard_granularity) - return; /* pool's discard support is disabled */ + if (!pool->pf.discard_enabled) + return; limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e15f3565892..5df40480228b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -591,7 +591,7 @@ retry: out: dm_put_live_table(md, *srcu_idx); - if (r == -ENOTCONN) { + if (r == -ENOTCONN && !fatal_signal_pending(current)) { msleep(10); goto retry; } @@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_target *tgt; + struct block_device *tgt_bdev = NULL; int srcu_idx, r; - r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); + r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); if (r < 0) return r; @@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, goto out; } - r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg); out: dm_put_live_table(md, srcu_idx); return r; diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 57dadd52b428..1deb8ff90a89 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) cf->data[2] |= CAN_ERR_PROT_FORM; else if (status & SER) cf->data[2] |= CAN_ERR_PROT_STUFF; - else - cf->data[2] |= CAN_ERR_PROT_UNSPEC; } priv->can.state = state; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 5d214d135332..f91b094288da 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev, * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; switch (lec_type) { case LEC_STUFF_ERROR: @@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | - CAN_ERR_PROT_LOC_ACK_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); @@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 70a8cbb29e75..1e37313054f3 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status) cf->data[2] |= CAN_ERR_PROT_BIT0; break; case STAT_LEC_CRC: - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; } } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 868fe945e35a..41c0fc9f3b14 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev, if (reg_esr & FLEXCAN_ESR_ACK_ERR) { netdev_dbg(dev, "ACK_ERR irq\n"); cf->can_id |= CAN_ERR_ACK; - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; tx_errors = 1; } if (reg_esr & FLEXCAN_ESR_CRC_ERR) { netdev_dbg(dev, "CRC_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT; - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; } if (reg_esr & FLEXCAN_ESR_FRM_ERR) { diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index c1e85368a198..5d04f5464faf 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & ECC_SEG; break; } diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index ef655177bb5e..39cf911f7a1e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev, * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; switch (lec_type) { case LEC_STUFF_ERROR: @@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | - CAN_ERR_PROT_LOC_ACK_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); @@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index e187ca783da0..c1317889d3d8 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) stats->rx_errors++; break; case PCH_CRC_ERR: - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; priv->can.can_stats.bus_error++; stats->rx_errors++; break; diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 7bd54191f962..bc46be39549d 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev) u8 ecsr; netdev_dbg(priv->ndev, "Bus error interrupt:\n"); - if (skb) { + if (skb) cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_UNSPEC; - } + ecsr = readb(&priv->regs->ecsr); if (ecsr & RCAR_CAN_ECSR_ADEF) { netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); tx_errors++; writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); if (skb) - cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; } if (ecsr & RCAR_CAN_ECSR_BE0F) { netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); @@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev) rx_errors++; writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); if (skb) - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } if (ecsr & RCAR_CAN_ECSR_AEF) { netdev_dbg(priv->ndev, "ACK Error\n"); @@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev) writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); if (skb) { cf->can_id |= CAN_ERR_ACK; - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } if (ecsr & RCAR_CAN_ECSR_FEF) { diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7b92e911a616..8dda3b703d39 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev) priv->write_reg(priv, SJA1000_RXERR, 0x0); priv->read_reg(priv, SJA1000_ECC); + /* clear interrupt flags */ + priv->read_reg(priv, SJA1000_IR); + /* leave reset mode */ set_normal_mode(dev); } @@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & ECC_SEG; break; } diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index d9a42c646783..68ef0a4cd821 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) >> 16; break; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index cf345cbfe819..680d1ff07a55 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, if (err_status & HECC_BUS_ERROR) { ++priv->can.can_stats.bus_error; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; if (err_status & HECC_CANES_FE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); cf->data[2] |= CAN_ERR_PROT_FORM; @@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } if (err_status & HECC_CANES_CRCE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } if (err_status & HECC_CANES_ACKE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); - cf->data[3] |= CAN_ERR_PROT_LOC_ACK | - CAN_ERR_PROT_LOC_ACK_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 2d390384ef3b..fc5b75675cd8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & SJA1000_ECC_SEG; break; } diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 0e5a4493ba4f..113e64fcd73b 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & SJA1000_ECC_SEG; break; } diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 8b17a9065b0b..022bfa13ebfa 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; if (es->leaf.error_factor & M16C_EF_ACKE) - cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; if (es->leaf.error_factor & M16C_EF_CRCE) - cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; if (es->leaf.error_factor & M16C_EF_FORME) cf->data[2] |= CAN_ERR_PROT_FORM; if (es->leaf.error_factor & M16C_EF_STFE) diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index de95b1ccba3e..a731720f1d13 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, tx_errors = 1; break; case USB_8DEV_STATUSMSG_CRC: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT0: diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index fc55e8e0351d..51670b322409 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { - if (skb) { + if (skb) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; - } /* Check for Ack error interrupt */ if (err_status & XCAN_ESR_ACKER_MASK) { stats->tx_errors++; if (skb) { cf->can_id |= CAN_ERR_ACK; - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } @@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT; - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } } priv->can.can_stats.bus_error++; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 955d06b9cdba..31c5e476fd64 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" +source "drivers/net/ethernet/aurora/Kconfig" source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 4a2ee98738f0..071f84eb6f3f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ +obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ obj-$(CONFIG_NET_CADENCE) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 991412ce6f48..9147a0107c44 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -450,12 +450,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - pdata->ring_ops->wr_cmd(tx_ring, count); skb_tx_timestamp(skb); pdata->stats.tx_packets++; pdata->stats.tx_bytes += skb->len; + pdata->ring_ops->wr_cmd(tx_ring, count); return NETDEV_TX_OK; } @@ -688,10 +688,10 @@ static int xgene_enet_open(struct net_device *ndev) mac_ops->tx_enable(pdata); mac_ops->rx_enable(pdata); + xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); if (ret) return ret; - xgene_enet_napi_enable(pdata); if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) phy_start(pdata->phy_dev); @@ -715,13 +715,13 @@ static int xgene_enet_close(struct net_device *ndev) else cancel_delayed_work_sync(&pdata->link_work); - xgene_enet_napi_disable(pdata); - xgene_enet_free_irq(ndev); - xgene_enet_process_ring(pdata->rx_ring, -1); - mac_ops->tx_disable(pdata); mac_ops->rx_disable(pdata); + xgene_enet_free_irq(ndev); + xgene_enet_napi_disable(pdata); + xgene_enet_process_ring(pdata->rx_ring, -1); + return 0; } @@ -1474,15 +1474,15 @@ static int xgene_enet_probe(struct platform_device *pdev) } ndev->hw_features = ndev->features; - ret = register_netdev(ndev); + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) { - netdev_err(ndev, "Failed to register netdev\n"); + netdev_err(ndev, "No usable DMA configuration\n"); goto err; } - ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); + ret = register_netdev(ndev); if (ret) { - netdev_err(ndev, "No usable DMA configuration\n"); + netdev_err(ndev, "Failed to register netdev\n"); goto err; } @@ -1490,14 +1490,17 @@ static int xgene_enet_probe(struct platform_device *pdev) if (ret) goto err; - xgene_enet_napi_add(pdata); mac_ops = pdata->mac_ops; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { ret = xgene_enet_mdio_config(pdata); - else + if (ret) + goto err; + } else { INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); + } - return ret; + xgene_enet_napi_add(pdata); + return 0; err: unregister_netdev(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c8af3ce3ea38..bd377a6b067d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = { .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index af006b44b2a6..0959e6824cb6 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -37,6 +37,7 @@ #define ALX_DEV_ID_AR8161 0x1091 #define ALX_DEV_ID_E2200 0xe091 +#define ALX_DEV_ID_E2400 0xe0a1 #define ALX_DEV_ID_AR8162 0x1090 #define ALX_DEV_ID_AR8171 0x10A1 #define ALX_DEV_ID_AR8172 0x10A0 diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig new file mode 100644 index 000000000000..a3c7106fdf85 --- /dev/null +++ b/drivers/net/ethernet/aurora/Kconfig @@ -0,0 +1,20 @@ +config NET_VENDOR_AURORA + bool "Aurora VLSI devices" + help + If you have a network (Ethernet) device belonging to this class, + say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + questions about Aurora devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_AURORA + +config AURORA_NB8800 + tristate "Aurora AU-NB8800 support" + select PHYLIB + help + Support for the AU-NB8800 gigabit Ethernet controller. + +endif diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile new file mode 100644 index 000000000000..6cb528a2fc26 --- /dev/null +++ b/drivers/net/ethernet/aurora/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_AURORA_NB8800) += nb8800.o diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c new file mode 100644 index 000000000000..ecc4a334c507 --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -0,0 +1,1552 @@ +/* + * Copyright (C) 2015 Mans Rullgard <mans@mansr.com> + * + * Mostly rewritten, based on driver from Sigma Designs. Original + * copyright notice below. + * + * + * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac. + * + * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/dma-mapping.h> +#include <linux/phy.h> +#include <linux/cache.h> +#include <linux/jiffies.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <asm/barrier.h> + +#include "nb8800.h" + +static void nb8800_tx_done(struct net_device *dev); +static int nb8800_dma_stop(struct net_device *dev); + +static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg) +{ + return readb_relaxed(priv->base + reg); +} + +static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg) +{ + return readl_relaxed(priv->base + reg); +} + +static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val) +{ + writeb_relaxed(val, priv->base + reg); +} + +static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val) +{ + writew_relaxed(val, priv->base + reg); +} + +static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val) +{ + writel_relaxed(val, priv->base + reg); +} + +static inline void nb8800_maskb(struct nb8800_priv *priv, int reg, + u32 mask, u32 val) +{ + u32 old = nb8800_readb(priv, reg); + u32 new = (old & ~mask) | (val & mask); + + if (new != old) + nb8800_writeb(priv, reg, new); +} + +static inline void nb8800_maskl(struct nb8800_priv *priv, int reg, + u32 mask, u32 val) +{ + u32 old = nb8800_readl(priv, reg); + u32 new = (old & ~mask) | (val & mask); + + if (new != old) + nb8800_writel(priv, reg, new); +} + +static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits, + bool set) +{ + nb8800_maskb(priv, reg, bits, set ? bits : 0); +} + +static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits) +{ + nb8800_maskb(priv, reg, bits, bits); +} + +static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits) +{ + nb8800_maskb(priv, reg, bits, 0); +} + +static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits, + bool set) +{ + nb8800_maskl(priv, reg, bits, set ? bits : 0); +} + +static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits) +{ + nb8800_maskl(priv, reg, bits, bits); +} + +static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits) +{ + nb8800_maskl(priv, reg, bits, 0); +} + +static int nb8800_mdio_wait(struct mii_bus *bus) +{ + struct nb8800_priv *priv = bus->priv; + u32 val; + + return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD, + val, !(val & MDIO_CMD_GO), 1, 1000); +} + +static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd) +{ + struct nb8800_priv *priv = bus->priv; + int err; + + err = nb8800_mdio_wait(bus); + if (err) + return err; + + nb8800_writel(priv, NB8800_MDIO_CMD, cmd); + udelay(10); + nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO); + + return nb8800_mdio_wait(bus); +} + +static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct nb8800_priv *priv = bus->priv; + u32 val; + int err; + + err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg)); + if (err) + return err; + + val = nb8800_readl(priv, NB8800_MDIO_STS); + if (val & MDIO_STS_ERR) + return 0xffff; + + return val & 0xffff; +} + +static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) | + MDIO_CMD_DATA(val) | MDIO_CMD_WR; + + return nb8800_mdio_cmd(bus, cmd); +} + +static void nb8800_mac_tx(struct net_device *dev, bool enable) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN) + cpu_relax(); + + nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable); +} + +static void nb8800_mac_rx(struct net_device *dev, bool enable) +{ + nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable); +} + +static void nb8800_mac_af(struct net_device *dev, bool enable) +{ + nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable); +} + +static void nb8800_start_rx(struct net_device *dev) +{ + nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN); +} + +static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; + struct nb8800_rx_buf *rxb = &priv->rx_bufs[i]; + int size = L1_CACHE_ALIGN(RX_BUF_SIZE); + dma_addr_t dma_addr; + struct page *page; + unsigned long offset; + void *data; + + data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size); + if (!data) + return -ENOMEM; + + page = virt_to_head_page(data); + offset = data - page_address(page); + + dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE, + DMA_FROM_DEVICE); + + if (dma_mapping_error(&dev->dev, dma_addr)) { + skb_free_frag(data); + return -ENOMEM; + } + + rxb->page = page; + rxb->offset = offset; + rxd->desc.s_addr = dma_addr; + + return 0; +} + +static void nb8800_receive(struct net_device *dev, unsigned int i, + unsigned int len) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; + struct page *page = priv->rx_bufs[i].page; + int offset = priv->rx_bufs[i].offset; + void *data = page_address(page) + offset; + dma_addr_t dma = rxd->desc.s_addr; + struct sk_buff *skb; + unsigned int size; + int err; + + size = len <= RX_COPYBREAK ? len : RX_COPYHDR; + + skb = napi_alloc_skb(&priv->napi, size); + if (!skb) { + netdev_err(dev, "rx skb allocation failed\n"); + dev->stats.rx_dropped++; + return; + } + + if (len <= RX_COPYBREAK) { + dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE); + memcpy(skb_put(skb, len), data, len); + dma_sync_single_for_device(&dev->dev, dma, len, + DMA_FROM_DEVICE); + } else { + err = nb8800_alloc_rx(dev, i, true); + if (err) { + netdev_err(dev, "rx buffer allocation failed\n"); + dev->stats.rx_dropped++; + return; + } + + dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE); + memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + offset + RX_COPYHDR, len - RX_COPYHDR, + RX_BUF_SIZE); + } + + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&priv->napi, skb); +} + +static void nb8800_rx_error(struct net_device *dev, u32 report) +{ + if (report & RX_LENGTH_ERR) + dev->stats.rx_length_errors++; + + if (report & RX_FCS_ERR) + dev->stats.rx_crc_errors++; + + if (report & RX_FIFO_OVERRUN) + dev->stats.rx_fifo_errors++; + + if (report & RX_ALIGNMENT_ERROR) + dev->stats.rx_frame_errors++; + + dev->stats.rx_errors++; +} + +static int nb8800_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd; + unsigned int last = priv->rx_eoc; + unsigned int next; + int work = 0; + + nb8800_tx_done(dev); + +again: + while (work < budget) { + struct nb8800_rx_buf *rxb; + unsigned int len; + + next = (last + 1) % RX_DESC_COUNT; + + rxb = &priv->rx_bufs[next]; + rxd = &priv->rx_descs[next]; + + if (!rxd->report) + break; + + len = RX_BYTES_TRANSFERRED(rxd->report); + + if (IS_RX_ERROR(rxd->report)) + nb8800_rx_error(dev, rxd->report); + else + nb8800_receive(dev, next, len); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + if (rxd->report & RX_MULTICAST_PKT) + dev->stats.multicast++; + + rxd->report = 0; + last = next; + work++; + } + + if (work) { + priv->rx_descs[last].desc.config |= DESC_EOC; + wmb(); /* ensure new EOC is written before clearing old */ + priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC; + priv->rx_eoc = last; + nb8800_start_rx(dev); + } + + if (work < budget) { + nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); + + /* If a packet arrived after we last checked but + * before writing RX_ITR, the interrupt will be + * delayed, so we retrieve it now. + */ + if (priv->rx_descs[next].report) + goto again; + + napi_complete_done(napi, work); + } + + return work; +} + +static void __nb8800_tx_dma_start(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_tx_buf *txb; + u32 txc_cr; + + txb = &priv->tx_bufs[priv->tx_queue]; + if (!txb->ready) + return; + + txc_cr = nb8800_readl(priv, NB8800_TXC_CR); + if (txc_cr & TCR_EN) + return; + + nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); + wmb(); /* ensure desc addr is written before starting DMA */ + nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN); + + priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT; +} + +static void nb8800_tx_dma_start(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->tx_lock); + __nb8800_tx_dma_start(dev); + spin_unlock_irq(&priv->tx_lock); +} + +static void nb8800_tx_dma_start_irq(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + spin_lock(&priv->tx_lock); + __nb8800_tx_dma_start(dev); + spin_unlock(&priv->tx_lock); +} + +static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_tx_desc *txd; + struct nb8800_tx_buf *txb; + struct nb8800_dma_desc *desc; + dma_addr_t dma_addr; + unsigned int dma_len; + unsigned int align; + unsigned int next; + + if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + align = (8 - (uintptr_t)skb->data) & 7; + + dma_len = skb->len - align; + dma_addr = dma_map_single(&dev->dev, skb->data + align, + dma_len, DMA_TO_DEVICE); + + if (dma_mapping_error(&dev->dev, dma_addr)) { + netdev_err(dev, "tx dma mapping error\n"); + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { + netif_stop_queue(dev); + skb->xmit_more = 0; + } + + next = priv->tx_next; + txb = &priv->tx_bufs[next]; + txd = &priv->tx_descs[next]; + desc = &txd->desc[0]; + + next = (next + 1) % TX_DESC_COUNT; + + if (align) { + memcpy(txd->buf, skb->data, align); + + desc->s_addr = + txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); + desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]); + desc->config = DESC_BTS(2) | DESC_DS | align; + + desc++; + } + + desc->s_addr = dma_addr; + desc->n_addr = priv->tx_bufs[next].dma_desc; + desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; + + if (!skb->xmit_more) + desc->config |= DESC_EOC; + + txb->skb = skb; + txb->dma_addr = dma_addr; + txb->dma_len = dma_len; + + if (!priv->tx_chain) { + txb->chain_len = 1; + priv->tx_chain = txb; + } else { + priv->tx_chain->chain_len++; + } + + netdev_sent_queue(dev, skb->len); + + priv->tx_next = next; + + if (!skb->xmit_more) { + smp_wmb(); + priv->tx_chain->ready = true; + priv->tx_chain = NULL; + nb8800_tx_dma_start(dev); + } + + return NETDEV_TX_OK; +} + +static void nb8800_tx_error(struct net_device *dev, u32 report) +{ + if (report & TX_LATE_COLLISION) + dev->stats.collisions++; + + if (report & TX_PACKET_DROPPED) + dev->stats.tx_dropped++; + + if (report & TX_FIFO_UNDERRUN) + dev->stats.tx_fifo_errors++; + + dev->stats.tx_errors++; +} + +static void nb8800_tx_done(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + unsigned int limit = priv->tx_next; + unsigned int done = priv->tx_done; + unsigned int packets = 0; + unsigned int len = 0; + + while (done != limit) { + struct nb8800_tx_desc *txd = &priv->tx_descs[done]; + struct nb8800_tx_buf *txb = &priv->tx_bufs[done]; + struct sk_buff *skb; + + if (!txd->report) + break; + + skb = txb->skb; + len += skb->len; + + dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len, + DMA_TO_DEVICE); + + if (IS_TX_ERROR(txd->report)) { + nb8800_tx_error(dev, txd->report); + kfree_skb(skb); + } else { + consume_skb(skb); + } + + dev->stats.tx_packets++; + dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report); + dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report); + + txb->skb = NULL; + txb->ready = false; + txd->report = 0; + + done = (done + 1) % TX_DESC_COUNT; + packets++; + } + + if (packets) { + smp_mb__before_atomic(); + atomic_add(packets, &priv->tx_free); + netdev_completed_queue(dev, packets, len); + netif_wake_queue(dev); + priv->tx_done = done; + } +} + +static irqreturn_t nb8800_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct nb8800_priv *priv = netdev_priv(dev); + irqreturn_t ret = IRQ_NONE; + u32 val; + + /* tx interrupt */ + val = nb8800_readl(priv, NB8800_TXC_SR); + if (val) { + nb8800_writel(priv, NB8800_TXC_SR, val); + + if (val & TSR_DI) + nb8800_tx_dma_start_irq(dev); + + if (val & TSR_TI) + napi_schedule_irqoff(&priv->napi); + + if (unlikely(val & TSR_DE)) + netdev_err(dev, "TX DMA error\n"); + + /* should never happen with automatic status retrieval */ + if (unlikely(val & TSR_TO)) + netdev_err(dev, "TX Status FIFO overflow\n"); + + ret = IRQ_HANDLED; + } + + /* rx interrupt */ + val = nb8800_readl(priv, NB8800_RXC_SR); + if (val) { + nb8800_writel(priv, NB8800_RXC_SR, val); + + if (likely(val & (RSR_RI | RSR_DI))) { + nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll); + napi_schedule_irqoff(&priv->napi); + } + + if (unlikely(val & RSR_DE)) + netdev_err(dev, "RX DMA error\n"); + + /* should never happen with automatic status retrieval */ + if (unlikely(val & RSR_RO)) + netdev_err(dev, "RX Status FIFO overflow\n"); + + ret = IRQ_HANDLED; + } + + return ret; +} + +static void nb8800_mac_config(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + bool gigabit = priv->speed == SPEED_1000; + u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE; + u32 mac_mode = 0; + u32 slot_time; + u32 phy_clk; + u32 ict; + + if (!priv->duplex) + mac_mode |= HALF_DUPLEX; + + if (gigabit) { + if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) + mac_mode |= RGMII_MODE; + + mac_mode |= GMAC_MODE; + phy_clk = 125000000; + + /* Should be 512 but register is only 8 bits */ + slot_time = 255; + } else { + phy_clk = 25000000; + slot_time = 128; + } + + ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk)); + + nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict); + nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time); + nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode); +} + +static void nb8800_pause_config(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + u32 rxcr; + + if (priv->pause_aneg) { + if (!phydev || !phydev->link) + return; + + priv->pause_rx = phydev->pause; + priv->pause_tx = phydev->pause ^ phydev->asym_pause; + } + + nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx); + + rxcr = nb8800_readl(priv, NB8800_RXC_CR); + if (!!(rxcr & RCR_FL) == priv->pause_tx) + return; + + if (netif_running(dev)) { + napi_disable(&priv->napi); + netif_tx_lock_bh(dev); + nb8800_dma_stop(dev); + nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); + nb8800_start_rx(dev); + netif_tx_unlock_bh(dev); + napi_enable(&priv->napi); + } else { + nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); + } +} + +static void nb8800_link_reconfigure(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + int change = 0; + + if (phydev->link) { + if (phydev->speed != priv->speed) { + priv->speed = phydev->speed; + change = 1; + } + + if (phydev->duplex != priv->duplex) { + priv->duplex = phydev->duplex; + change = 1; + } + + if (change) + nb8800_mac_config(dev); + + nb8800_pause_config(dev); + } + + if (phydev->link != priv->link) { + priv->link = phydev->link; + change = 1; + } + + if (change) + phy_print_status(priv->phydev); +} + +static void nb8800_update_mac_addr(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < ETH_ALEN; i++) + nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]); + + for (i = 0; i < ETH_ALEN; i++) + nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]); +} + +static int nb8800_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sock = addr; + + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, sock->sa_data); + nb8800_update_mac_addr(dev); + + return 0; +} + +static void nb8800_mc_init(struct net_device *dev, int val) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + nb8800_writeb(priv, NB8800_MC_INIT, val); + readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val, + 1, 1000); +} + +static void nb8800_set_rx_mode(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + nb8800_mac_af(dev, false); + return; + } + + nb8800_mac_af(dev, true); + nb8800_mc_init(dev, 0); + + netdev_for_each_mc_addr(ha, dev) { + for (i = 0; i < ETH_ALEN; i++) + nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]); + + nb8800_mc_init(dev, 0xff); + } +} + +#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc)) +#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc)) + +static void nb8800_dma_free(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + unsigned int i; + + if (priv->rx_bufs) { + for (i = 0; i < RX_DESC_COUNT; i++) + if (priv->rx_bufs[i].page) + put_page(priv->rx_bufs[i].page); + + kfree(priv->rx_bufs); + priv->rx_bufs = NULL; + } + + if (priv->tx_bufs) { + for (i = 0; i < TX_DESC_COUNT; i++) + kfree_skb(priv->tx_bufs[i].skb); + + kfree(priv->tx_bufs); + priv->tx_bufs = NULL; + } + + if (priv->rx_descs) { + dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs, + priv->rx_desc_dma); + priv->rx_descs = NULL; + } + + if (priv->tx_descs) { + dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs, + priv->tx_desc_dma); + priv->tx_descs = NULL; + } +} + +static void nb8800_dma_reset(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd; + struct nb8800_tx_desc *txd; + unsigned int i; + + for (i = 0; i < RX_DESC_COUNT; i++) { + dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd); + + rxd = &priv->rx_descs[i]; + rxd->desc.n_addr = rx_dma + sizeof(*rxd); + rxd->desc.r_addr = + rx_dma + offsetof(struct nb8800_rx_desc, report); + rxd->desc.config = priv->rx_dma_config; + rxd->report = 0; + } + + rxd->desc.n_addr = priv->rx_desc_dma; + rxd->desc.config |= DESC_EOC; + + priv->rx_eoc = RX_DESC_COUNT - 1; + + for (i = 0; i < TX_DESC_COUNT; i++) { + struct nb8800_tx_buf *txb = &priv->tx_bufs[i]; + dma_addr_t r_dma = txb->dma_desc + + offsetof(struct nb8800_tx_desc, report); + + txd = &priv->tx_descs[i]; + txd->desc[0].r_addr = r_dma; + txd->desc[1].r_addr = r_dma; + txd->report = 0; + } + + priv->tx_next = 0; + priv->tx_queue = 0; + priv->tx_done = 0; + atomic_set(&priv->tx_free, TX_DESC_COUNT); + + nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma); + + wmb(); /* ensure all setup is written before starting */ +} + +static int nb8800_dma_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + unsigned int n_rx = RX_DESC_COUNT; + unsigned int n_tx = TX_DESC_COUNT; + unsigned int i; + int err; + + priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE, + &priv->rx_desc_dma, GFP_KERNEL); + if (!priv->rx_descs) + goto err_out; + + priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL); + if (!priv->rx_bufs) + goto err_out; + + for (i = 0; i < n_rx; i++) { + err = nb8800_alloc_rx(dev, i, false); + if (err) + goto err_out; + } + + priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE, + &priv->tx_desc_dma, GFP_KERNEL); + if (!priv->tx_descs) + goto err_out; + + priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL); + if (!priv->tx_bufs) + goto err_out; + + for (i = 0; i < n_tx; i++) + priv->tx_bufs[i].dma_desc = + priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc); + + nb8800_dma_reset(dev); + + return 0; + +err_out: + nb8800_dma_free(dev); + + return -ENOMEM; +} + +static int nb8800_dma_stop(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_tx_buf *txb = &priv->tx_bufs[0]; + struct nb8800_tx_desc *txd = &priv->tx_descs[0]; + int retry = 5; + u32 txcr; + u32 rxcr; + int err; + unsigned int i; + + /* wait for tx to finish */ + err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr, + !(txcr & TCR_EN) && + priv->tx_done == priv->tx_next, + 1000, 1000000); + if (err) + return err; + + /* The rx DMA only stops if it reaches the end of chain. + * To make this happen, we set the EOC flag on all rx + * descriptors, put the device in loopback mode, and send + * a few dummy frames. The interrupt handler will ignore + * these since NAPI is disabled and no real frames are in + * the tx queue. + */ + + for (i = 0; i < RX_DESC_COUNT; i++) + priv->rx_descs[i].desc.config |= DESC_EOC; + + txd->desc[0].s_addr = + txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); + txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8; + memset(txd->buf, 0, sizeof(txd->buf)); + + nb8800_mac_af(dev, false); + nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN); + + do { + nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); + wmb(); + nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN); + + err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR, + rxcr, !(rxcr & RCR_EN), + 1000, 100000); + } while (err && --retry); + + nb8800_mac_af(dev, true); + nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN); + nb8800_dma_reset(dev); + + return retry ? 0 : -ETIMEDOUT; +} + +static void nb8800_pause_adv(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + u32 adv = 0; + + if (!priv->phydev) + return; + + if (priv->pause_rx) + adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + if (priv->pause_tx) + adv ^= ADVERTISED_Asym_Pause; + + priv->phydev->supported |= adv; + priv->phydev->advertising |= adv; +} + +static int nb8800_open(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int err; + + /* clear any pending interrupts */ + nb8800_writel(priv, NB8800_RXC_SR, 0xf); + nb8800_writel(priv, NB8800_TXC_SR, 0xf); + + err = nb8800_dma_init(dev); + if (err) + return err; + + err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev); + if (err) + goto err_free_dma; + + nb8800_mac_rx(dev, true); + nb8800_mac_tx(dev, true); + + priv->phydev = of_phy_connect(dev, priv->phy_node, + nb8800_link_reconfigure, 0, + priv->phy_mode); + if (!priv->phydev) + goto err_free_irq; + + nb8800_pause_adv(dev); + + netdev_reset_queue(dev); + napi_enable(&priv->napi); + netif_start_queue(dev); + + nb8800_start_rx(dev); + phy_start(priv->phydev); + + return 0; + +err_free_irq: + free_irq(dev->irq, dev); +err_free_dma: + nb8800_dma_free(dev); + + return err; +} + +static int nb8800_stop(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + phy_stop(priv->phydev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + nb8800_dma_stop(dev); + nb8800_mac_rx(dev, false); + nb8800_mac_tx(dev, false); + + phy_disconnect(priv->phydev); + priv->phydev = NULL; + + free_irq(dev->irq, dev); + + nb8800_dma_free(dev); + + return 0; +} + +static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static const struct net_device_ops nb8800_netdev_ops = { + .ndo_open = nb8800_open, + .ndo_stop = nb8800_stop, + .ndo_start_xmit = nb8800_xmit, + .ndo_set_mac_address = nb8800_set_mac_address, + .ndo_set_rx_mode = nb8800_set_rx_mode, + .ndo_do_ioctl = nb8800_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int nb8800_nway_reset(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return genphy_restart_aneg(priv->phydev); +} + +static void nb8800_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pp) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + pp->autoneg = priv->pause_aneg; + pp->rx_pause = priv->pause_rx; + pp->tx_pause = priv->pause_tx; +} + +static int nb8800_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pp) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + priv->pause_aneg = pp->autoneg; + priv->pause_rx = pp->rx_pause; + priv->pause_tx = pp->tx_pause; + + nb8800_pause_adv(dev); + + if (!priv->pause_aneg) + nb8800_pause_config(dev); + else if (priv->phydev) + phy_start_aneg(priv->phydev); + + return 0; +} + +static const char nb8800_stats_names[][ETH_GSTRING_LEN] = { + "rx_bytes_ok", + "rx_frames_ok", + "rx_undersize_frames", + "rx_fragment_frames", + "rx_64_byte_frames", + "rx_127_byte_frames", + "rx_255_byte_frames", + "rx_511_byte_frames", + "rx_1023_byte_frames", + "rx_max_size_frames", + "rx_oversize_frames", + "rx_bad_fcs_frames", + "rx_broadcast_frames", + "rx_multicast_frames", + "rx_control_frames", + "rx_pause_frames", + "rx_unsup_control_frames", + "rx_align_error_frames", + "rx_overrun_frames", + "rx_jabber_frames", + "rx_bytes", + "rx_frames", + + "tx_bytes_ok", + "tx_frames_ok", + "tx_64_byte_frames", + "tx_127_byte_frames", + "tx_255_byte_frames", + "tx_511_byte_frames", + "tx_1023_byte_frames", + "tx_max_size_frames", + "tx_oversize_frames", + "tx_broadcast_frames", + "tx_multicast_frames", + "tx_control_frames", + "tx_pause_frames", + "tx_underrun_frames", + "tx_single_collision_frames", + "tx_multi_collision_frames", + "tx_deferred_collision_frames", + "tx_late_collision_frames", + "tx_excessive_collision_frames", + "tx_bytes", + "tx_frames", + "tx_collisions", +}; + +#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names) + +static int nb8800_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return NB8800_NUM_STATS; + + return -EOPNOTSUPP; +} + +static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf) +{ + if (sset == ETH_SS_STATS) + memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names)); +} + +static u32 nb8800_read_stat(struct net_device *dev, int index) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + nb8800_writeb(priv, NB8800_STAT_INDEX, index); + + return nb8800_readl(priv, NB8800_STAT_DATA); +} + +static void nb8800_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *st) +{ + unsigned int i; + u32 rx, tx; + + for (i = 0; i < NB8800_NUM_STATS / 2; i++) { + rx = nb8800_read_stat(dev, i); + tx = nb8800_read_stat(dev, i | 0x80); + st[i] = rx; + st[i + NB8800_NUM_STATS / 2] = tx; + } +} + +static const struct ethtool_ops nb8800_ethtool_ops = { + .get_settings = nb8800_get_settings, + .set_settings = nb8800_set_settings, + .nway_reset = nb8800_nway_reset, + .get_link = ethtool_op_get_link, + .get_pauseparam = nb8800_get_pauseparam, + .set_pauseparam = nb8800_set_pauseparam, + .get_sset_count = nb8800_get_sset_count, + .get_strings = nb8800_get_strings, + .get_ethtool_stats = nb8800_get_ethtool_stats, +}; + +static int nb8800_hw_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + u32 val; + + val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS; + nb8800_writeb(priv, NB8800_TX_CTL1, val); + + /* Collision retry count */ + nb8800_writeb(priv, NB8800_TX_CTL2, 5); + + val = RX_PAD_STRIP | RX_AF_EN; + nb8800_writeb(priv, NB8800_RX_CTL, val); + + /* Chosen by fair dice roll */ + nb8800_writeb(priv, NB8800_RANDOM_SEED, 4); + + /* TX cycles per deferral period */ + nb8800_writeb(priv, NB8800_TX_SDP, 12); + + /* The following three threshold values have been + * experimentally determined for good results. + */ + + /* RX/TX FIFO threshold for partial empty (64-bit entries) */ + nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0); + + /* RX/TX FIFO threshold for partial full (64-bit entries) */ + nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255); + + /* Buffer size for transmit (64-bit entries) */ + nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64); + + /* Configure tx DMA */ + + val = nb8800_readl(priv, NB8800_TXC_CR); + val &= TCR_LE; /* keep endian setting */ + val |= TCR_DM; /* DMA descriptor mode */ + val |= TCR_RS; /* automatically store tx status */ + val |= TCR_DIE; /* interrupt on DMA chain completion */ + val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */ + val |= TCR_BTS(2); /* 32-byte bus transaction size */ + nb8800_writel(priv, NB8800_TXC_CR, val); + + /* TX complete interrupt after 10 ms or 7 frames (see above) */ + val = clk_get_rate(priv->clk) / 100; + nb8800_writel(priv, NB8800_TX_ITR, val); + + /* Configure rx DMA */ + + val = nb8800_readl(priv, NB8800_RXC_CR); + val &= RCR_LE; /* keep endian setting */ + val |= RCR_DM; /* DMA descriptor mode */ + val |= RCR_RS; /* automatically store rx status */ + val |= RCR_DIE; /* interrupt at end of DMA chain */ + val |= RCR_RFI(7); /* interrupt after 7 frames received */ + val |= RCR_BTS(2); /* 32-byte bus transaction size */ + nb8800_writel(priv, NB8800_RXC_CR, val); + + /* The rx interrupt can fire before the DMA has completed + * unless a small delay is added. 50 us is hopefully enough. + */ + priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000; + + /* In NAPI poll mode we want to disable interrupts, but the + * hardware does not permit this. Delay 10 ms instead. + */ + priv->rx_itr_poll = clk_get_rate(priv->clk) / 100; + + nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); + + priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF; + + /* Flow control settings */ + + /* Pause time of 0.1 ms */ + val = 100000 / 512; + nb8800_writeb(priv, NB8800_PQ1, val >> 8); + nb8800_writeb(priv, NB8800_PQ2, val & 0xff); + + /* Auto-negotiate by default */ + priv->pause_aneg = true; + priv->pause_rx = true; + priv->pause_tx = true; + + nb8800_mc_init(dev, 0); + + return 0; +} + +static int nb8800_tangox_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + u32 pad_mode = PAD_MODE_MII; + + switch (priv->phy_mode) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + pad_mode = PAD_MODE_MII; + break; + + case PHY_INTERFACE_MODE_RGMII: + pad_mode = PAD_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; + break; + + default: + dev_err(dev->dev.parent, "unsupported phy mode %s\n", + phy_modes(priv->phy_mode)); + return -EINVAL; + } + + nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode); + + return 0; +} + +static int nb8800_tangox_reset(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int clk_div; + + nb8800_writeb(priv, NB8800_TANGOX_RESET, 0); + usleep_range(1000, 10000); + nb8800_writeb(priv, NB8800_TANGOX_RESET, 1); + + wmb(); /* ensure reset is cleared before proceeding */ + + clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK); + nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div); + + return 0; +} + +static const struct nb8800_ops nb8800_tangox_ops = { + .init = nb8800_tangox_init, + .reset = nb8800_tangox_reset, +}; + +static int nb8800_tango4_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int err; + + err = nb8800_tangox_init(dev); + if (err) + return err; + + /* On tango4 interrupt on DMA completion per frame works and gives + * better performance despite generating more rx interrupts. + */ + + /* Disable unnecessary interrupt on rx completion */ + nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7)); + + /* Request interrupt on descriptor DMA completion */ + priv->rx_dma_config |= DESC_ID; + + return 0; +} + +static const struct nb8800_ops nb8800_tango4_ops = { + .init = nb8800_tango4_init, + .reset = nb8800_tangox_reset, +}; + +static const struct of_device_id nb8800_dt_ids[] = { + { + .compatible = "aurora,nb8800", + }, + { + .compatible = "sigma,smp8642-ethernet", + .data = &nb8800_tangox_ops, + }, + { + .compatible = "sigma,smp8734-ethernet", + .data = &nb8800_tango4_ops, + }, + { } +}; + +static int nb8800_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct nb8800_ops *ops = NULL; + struct nb8800_priv *priv; + struct resource *res; + struct net_device *dev; + struct mii_bus *bus; + const unsigned char *mac; + void __iomem *base; + int irq; + int ret; + + match = of_match_device(nb8800_dt_ids, &pdev->dev); + if (match) + ops = match->data; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, "No IRQ\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); + + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + priv = netdev_priv(dev); + priv->base = base; + + priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (priv->phy_mode < 0) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + ret = PTR_ERR(priv->clk); + goto err_free_dev; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + goto err_free_dev; + + spin_lock_init(&priv->tx_lock); + + if (ops && ops->reset) { + ret = ops->reset(dev); + if (ret) + goto err_free_dev; + } + + bus = devm_mdiobus_alloc(&pdev->dev); + if (!bus) { + ret = -ENOMEM; + goto err_disable_clk; + } + + bus->name = "nb8800-mii"; + bus->read = nb8800_mdio_read; + bus->write = nb8800_mdio_write; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii", + (unsigned long)res->start); + bus->priv = priv; + + ret = of_mdiobus_register(bus, pdev->dev.of_node); + if (ret) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_disable_clk; + } + + priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_node) { + dev_err(&pdev->dev, "no PHY specified\n"); + ret = -ENODEV; + goto err_free_bus; + } + + priv->mii_bus = bus; + + ret = nb8800_hw_init(dev); + if (ret) + goto err_free_bus; + + if (ops && ops->init) { + ret = ops->init(dev); + if (ret) + goto err_free_bus; + } + + dev->netdev_ops = &nb8800_netdev_ops; + dev->ethtool_ops = &nb8800_ethtool_ops; + dev->flags |= IFF_MULTICAST; + dev->irq = irq; + + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + ether_addr_copy(dev->dev_addr, mac); + + if (!is_valid_ether_addr(dev->dev_addr)) + eth_hw_addr_random(dev); + + nb8800_update_mac_addr(dev); + + netif_carrier_off(dev); + + ret = register_netdev(dev); + if (ret) { + netdev_err(dev, "failed to register netdev\n"); + goto err_free_dma; + } + + netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT); + + netdev_info(dev, "MAC address %pM\n", dev->dev_addr); + + return 0; + +err_free_dma: + nb8800_dma_free(dev); +err_free_bus: + mdiobus_unregister(bus); +err_disable_clk: + clk_disable_unprepare(priv->clk); +err_free_dev: + free_netdev(dev); + + return ret; +} + +static int nb8800_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct nb8800_priv *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + + mdiobus_unregister(priv->mii_bus); + + clk_disable_unprepare(priv->clk); + + nb8800_dma_free(ndev); + free_netdev(ndev); + + return 0; +} + +static struct platform_driver nb8800_driver = { + .driver = { + .name = "nb8800", + .of_match_table = nb8800_dt_ids, + }, + .probe = nb8800_probe, + .remove = nb8800_remove, +}; + +module_platform_driver(nb8800_driver); + +MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver"); +MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h new file mode 100644 index 000000000000..e5adbc2aac9f --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.h @@ -0,0 +1,316 @@ +#ifndef _NB8800_H_ +#define _NB8800_H_ + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/phy.h> +#include <linux/clk.h> +#include <linux/bitops.h> + +#define RX_DESC_COUNT 256 +#define TX_DESC_COUNT 256 + +#define NB8800_DESC_LOW 4 + +#define RX_BUF_SIZE 1552 + +#define RX_COPYBREAK 256 +#define RX_COPYHDR 128 + +#define MAX_MDC_CLOCK 2500000 + +/* Stargate Solutions SSN8800 core registers */ +#define NB8800_TX_CTL1 0x000 +#define TX_TPD BIT(5) +#define TX_APPEND_FCS BIT(4) +#define TX_PAD_EN BIT(3) +#define TX_RETRY_EN BIT(2) +#define TX_EN BIT(0) + +#define NB8800_TX_CTL2 0x001 + +#define NB8800_RX_CTL 0x004 +#define RX_BC_DISABLE BIT(7) +#define RX_RUNT BIT(6) +#define RX_AF_EN BIT(5) +#define RX_PAUSE_EN BIT(3) +#define RX_SEND_CRC BIT(2) +#define RX_PAD_STRIP BIT(1) +#define RX_EN BIT(0) + +#define NB8800_RANDOM_SEED 0x008 +#define NB8800_TX_SDP 0x14 +#define NB8800_TX_TPDP1 0x18 +#define NB8800_TX_TPDP2 0x19 +#define NB8800_SLOT_TIME 0x1c + +#define NB8800_MDIO_CMD 0x020 +#define MDIO_CMD_GO BIT(31) +#define MDIO_CMD_WR BIT(26) +#define MDIO_CMD_ADDR(x) ((x) << 21) +#define MDIO_CMD_REG(x) ((x) << 16) +#define MDIO_CMD_DATA(x) ((x) << 0) + +#define NB8800_MDIO_STS 0x024 +#define MDIO_STS_ERR BIT(31) + +#define NB8800_MC_ADDR(i) (0x028 + (i)) +#define NB8800_MC_INIT 0x02e +#define NB8800_UC_ADDR(i) (0x03c + (i)) + +#define NB8800_MAC_MODE 0x044 +#define RGMII_MODE BIT(7) +#define HALF_DUPLEX BIT(4) +#define BURST_EN BIT(3) +#define LOOPBACK_EN BIT(2) +#define GMAC_MODE BIT(0) + +#define NB8800_IC_THRESHOLD 0x050 +#define NB8800_PE_THRESHOLD 0x051 +#define NB8800_PF_THRESHOLD 0x052 +#define NB8800_TX_BUFSIZE 0x054 +#define NB8800_FIFO_CTL 0x056 +#define NB8800_PQ1 0x060 +#define NB8800_PQ2 0x061 +#define NB8800_SRC_ADDR(i) (0x06a + (i)) +#define NB8800_STAT_DATA 0x078 +#define NB8800_STAT_INDEX 0x07c +#define NB8800_STAT_CLEAR 0x07d + +#define NB8800_SLEEP_MODE 0x07e +#define SLEEP_MODE BIT(0) + +#define NB8800_WAKEUP 0x07f +#define WAKEUP BIT(0) + +/* Aurora NB8800 host interface registers */ +#define NB8800_TXC_CR 0x100 +#define TCR_LK BIT(12) +#define TCR_DS BIT(11) +#define TCR_BTS(x) (((x) & 0x7) << 8) +#define TCR_DIE BIT(7) +#define TCR_TFI(x) (((x) & 0x7) << 4) +#define TCR_LE BIT(3) +#define TCR_RS BIT(2) +#define TCR_DM BIT(1) +#define TCR_EN BIT(0) + +#define NB8800_TXC_SR 0x104 +#define TSR_DE BIT(3) +#define TSR_DI BIT(2) +#define TSR_TO BIT(1) +#define TSR_TI BIT(0) + +#define NB8800_TX_SAR 0x108 +#define NB8800_TX_DESC_ADDR 0x10c + +#define NB8800_TX_REPORT_ADDR 0x110 +#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff) +#define TX_FIRST_DEFERRAL BIT(7) +#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf) +#define TX_LATE_COLLISION BIT(2) +#define TX_PACKET_DROPPED BIT(1) +#define TX_FIFO_UNDERRUN BIT(0) +#define IS_TX_ERROR(r) ((r) & 0x07) + +#define NB8800_TX_FIFO_SR 0x114 +#define NB8800_TX_ITR 0x118 + +#define NB8800_RXC_CR 0x200 +#define RCR_FL BIT(13) +#define RCR_LK BIT(12) +#define RCR_DS BIT(11) +#define RCR_BTS(x) (((x) & 7) << 8) +#define RCR_DIE BIT(7) +#define RCR_RFI(x) (((x) & 7) << 4) +#define RCR_LE BIT(3) +#define RCR_RS BIT(2) +#define RCR_DM BIT(1) +#define RCR_EN BIT(0) + +#define NB8800_RXC_SR 0x204 +#define RSR_DE BIT(3) +#define RSR_DI BIT(2) +#define RSR_RO BIT(1) +#define RSR_RI BIT(0) + +#define NB8800_RX_SAR 0x208 +#define NB8800_RX_DESC_ADDR 0x20c + +#define NB8800_RX_REPORT_ADDR 0x210 +#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF) +#define RX_MULTICAST_PKT BIT(9) +#define RX_BROADCAST_PKT BIT(8) +#define RX_LENGTH_ERR BIT(7) +#define RX_FCS_ERR BIT(6) +#define RX_RUNT_PKT BIT(5) +#define RX_FIFO_OVERRUN BIT(4) +#define RX_LATE_COLLISION BIT(3) +#define RX_ALIGNMENT_ERROR BIT(2) +#define RX_ERROR_MASK 0xfc +#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK) + +#define NB8800_RX_FIFO_SR 0x214 +#define NB8800_RX_ITR 0x218 + +/* Sigma Designs SMP86xx additional registers */ +#define NB8800_TANGOX_PAD_MODE 0x400 +#define PAD_MODE_MASK 0x7 +#define PAD_MODE_MII 0x0 +#define PAD_MODE_RGMII 0x1 +#define PAD_MODE_GTX_CLK_INV BIT(3) +#define PAD_MODE_GTX_CLK_DELAY BIT(4) + +#define NB8800_TANGOX_MDIO_CLKDIV 0x420 +#define NB8800_TANGOX_RESET 0x424 + +/* Hardware DMA descriptor */ +struct nb8800_dma_desc { + u32 s_addr; /* start address */ + u32 n_addr; /* next descriptor address */ + u32 r_addr; /* report address */ + u32 config; +} __aligned(8); + +#define DESC_ID BIT(23) +#define DESC_EOC BIT(22) +#define DESC_EOF BIT(21) +#define DESC_LK BIT(20) +#define DESC_DS BIT(19) +#define DESC_BTS(x) (((x) & 0x7) << 16) + +/* DMA descriptor and associated data for rx. + * Allocated from coherent memory. + */ +struct nb8800_rx_desc { + /* DMA descriptor */ + struct nb8800_dma_desc desc; + + /* Status report filled in by hardware */ + u32 report; +}; + +/* Address of buffer on rx ring */ +struct nb8800_rx_buf { + struct page *page; + unsigned long offset; +}; + +/* DMA descriptors and associated data for tx. + * Allocated from coherent memory. + */ +struct nb8800_tx_desc { + /* DMA descriptor. The second descriptor is used if packet + * data is unaligned. + */ + struct nb8800_dma_desc desc[2]; + + /* Status report filled in by hardware */ + u32 report; + + /* Bounce buffer for initial unaligned part of packet */ + u8 buf[8] __aligned(8); +}; + +/* Packet in tx queue */ +struct nb8800_tx_buf { + /* Currently queued skb */ + struct sk_buff *skb; + + /* DMA address of the first descriptor */ + dma_addr_t dma_desc; + + /* DMA address of packet data */ + dma_addr_t dma_addr; + + /* Length of DMA mapping, less than skb->len if alignment + * buffer is used. + */ + unsigned int dma_len; + + /* Number of packets in chain starting here */ + unsigned int chain_len; + + /* Packet chain ready to be submitted to hardware */ + bool ready; +}; + +struct nb8800_priv { + struct napi_struct napi; + + void __iomem *base; + + /* RX DMA descriptors */ + struct nb8800_rx_desc *rx_descs; + + /* RX buffers referenced by DMA descriptors */ + struct nb8800_rx_buf *rx_bufs; + + /* Current end of chain */ + u32 rx_eoc; + + /* Value for rx interrupt time register in NAPI interrupt mode */ + u32 rx_itr_irq; + + /* Value for rx interrupt time register in NAPI poll mode */ + u32 rx_itr_poll; + + /* Value for config field of rx DMA descriptors */ + u32 rx_dma_config; + + /* TX DMA descriptors */ + struct nb8800_tx_desc *tx_descs; + + /* TX packet queue */ + struct nb8800_tx_buf *tx_bufs; + + /* Number of free tx queue entries */ + atomic_t tx_free; + + /* First free tx queue entry */ + u32 tx_next; + + /* Next buffer to transmit */ + u32 tx_queue; + + /* Start of current packet chain */ + struct nb8800_tx_buf *tx_chain; + + /* Next buffer to reclaim */ + u32 tx_done; + + /* Lock for DMA activation */ + spinlock_t tx_lock; + + struct mii_bus *mii_bus; + struct device_node *phy_node; + struct phy_device *phydev; + + /* PHY connection type from DT */ + int phy_mode; + + /* Current link status */ + int speed; + int duplex; + int link; + + /* Pause settings */ + bool pause_aneg; + bool pause_rx; + bool pause_tx; + + /* DMA base address of rx descriptors, see rx_descs above */ + dma_addr_t rx_desc_dma; + + /* DMA base address of tx descriptors, see tx_descs above */ + dma_addr_t tx_desc_dma; + + struct clk *clk; +}; + +struct nb8800_ops { + int (*init)(struct net_device *dev); + int (*reset)(struct net_device *dev); +}; + +#endif /* _NB8800_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c9b036789184..2e611dc5f162 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); return; } - bp->vxlan_dst_port--; - if (bp->vxlan_dst_port) + bp->vxlan_dst_port_count--; + if (bp->vxlan_dst_port_count) return; if (netif_running(bp->dev)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index db15c5ee09c5..bdf094fb6ef9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3625,6 +3625,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); @@ -3648,8 +3649,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) vf->fw_fid = le16_to_cpu(resp->fid); memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); - if (!is_valid_ether_addr(vf->mac_addr)) - random_ether_addr(vf->mac_addr); + if (is_valid_ether_addr(vf->mac_addr)) + /* overwrite netdev dev_adr with admin VF MAC */ + memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + else + random_ether_addr(bp->dev->dev_addr); vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); @@ -3880,6 +3884,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) #endif } +static int bnxt_cfg_rx_mode(struct bnxt *); + static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { int rc = 0; @@ -3946,11 +3952,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) bp->vnic_info[0].rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; - rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); - if (rc) { - netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); + rc = bnxt_cfg_rx_mode(bp); + if (rc) goto err_out; - } rc = bnxt_hwrm_set_coal(bp); if (rc) @@ -4865,7 +4869,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) } } -static void bnxt_cfg_rx_mode(struct bnxt *bp) +static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; @@ -4914,6 +4918,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp) netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); vnic->uc_filter_count = i; + return rc; } } @@ -4922,6 +4927,8 @@ skip_uc: if (rc) netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); + + return rc; } static netdev_features_t bnxt_fix_features(struct net_device *dev, @@ -5212,13 +5219,27 @@ init_err: static int bnxt_change_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; + struct bnxt *bp = netdev_priv(dev); + int rc = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) + return -EADDRNOTAVAIL; +#endif + + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (netif_running(dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } - return 0; + return rc; } /* rtnl_lock held */ @@ -5686,15 +5707,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); dflt_rings = netif_get_num_default_rss_queues(); - if (BNXT_PF(bp)) { - memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + if (BNXT_PF(bp)) bp->pf.max_irqs = max_irqs; - } else { #if defined(CONFIG_BNXT_SRIOV) - memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + else bp->vf.max_irqs = max_irqs; #endif - } bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f4cf68861069..7a9af2887d8e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (!is_valid_ether_addr(resp->perm_mac_address)) goto update_vf_mac_exit; - if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) - goto update_vf_mac_exit; - - memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) + memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 88c1e1a834f8..169059c92f80 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp) macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) + config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ config |= MACB_BIT(PAE); /* PAuse Enable */ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ @@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev) /* Set MII management clock divider */ val = macb_mdc_clk_div(bp); val |= macb_dbw(bp); + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) + val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); macb_writel(bp, NCFGR, val); return 0; diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 6e1faea00ca8..d83b0db77821 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -215,12 +215,17 @@ /* GEM specific NCFGR bitfields. */ #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ #define GEM_GBE_SIZE 1 +#define GEM_PCSSEL_OFFSET 11 +#define GEM_PCSSEL_SIZE 1 #define GEM_CLK_OFFSET 18 /* MDC clock division */ #define GEM_CLK_SIZE 3 #define GEM_DBW_OFFSET 21 /* Data bus width */ #define GEM_DBW_SIZE 2 #define GEM_RXCOEN_OFFSET 24 #define GEM_RXCOEN_SIZE 1 +#define GEM_SGMIIEN_OFFSET 27 +#define GEM_SGMIIEN_SIZE 1 + /* Constants for data bus width. */ #define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index d3950b20feb9..39ca6744a4e6 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -120,10 +120,9 @@ * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected * - * 1 tick per 0.05usec = value of 2.2 - * This 10% would be covered in CQ timer thresh value + * 1 tick per 0.025usec */ -#define NICPF_CLK_PER_INT_TICK 2 +#define NICPF_CLK_PER_INT_TICK 1 /* Time to wait before we decide that a SQ is stuck. * diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index c561fdcb79a7..4b7fd63ae57c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -37,6 +37,7 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 vf_lmac_map[MAX_LMAC]; + u8 lmac_cnt; struct delayed_work dwork; struct workqueue_struct *check_link; u8 link[MAX_LMAC]; @@ -279,6 +280,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) u64 lmac_credit; nic->num_vf_en = 0; + nic->lmac_cnt = 0; for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { if (!(bgx_map & (1 << bgx))) @@ -288,6 +290,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic->vf_lmac_map[next_bgx_lmac++] = NIC_SET_VF_LMAC_MAP(bgx, lmac); nic->num_vf_en += lmac_cnt; + nic->lmac_cnt += lmac_cnt; /* Program LMAC credits */ lmac_credit = (1ull << 1); /* channel credit enable */ @@ -715,6 +718,13 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ nic->vf_enabled[vf] = true; + if (vf >= nic->lmac_cnt) + goto unlock; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); goto unlock; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ @@ -722,6 +732,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) if (vf >= nic->num_vf_en) nic->sqs_used[vf - nic->num_vf_en] = false; nic->pqs_vf[vf] = 0; + + if (vf >= nic->lmac_cnt) + break; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); @@ -940,7 +958,7 @@ static void nic_poll_for_link(struct work_struct *work) mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - for (vf = 0; vf < nic->num_vf_en; vf++) { + for (vf = 0; vf < nic->lmac_cnt; vf++) { /* Poll only if VF is UP */ if (!nic->vf_enabled[vf]) continue; @@ -1074,8 +1092,7 @@ static void nic_remove(struct pci_dev *pdev) if (nic->check_link) { /* Destroy work Queue */ - cancel_delayed_work(&nic->dwork); - flush_workqueue(nic->check_link); + cancel_delayed_work_sync(&nic->dwork); destroy_workqueue(nic->check_link); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index af54c10945c2..a12b2e38cf61 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev, cmd->supported = 0; cmd->transceiver = XCVR_EXTERNAL; + + if (!nic->link_up) { + cmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + return 0; + } + if (nic->speed <= 1000) { cmd->port = PORT_MII; cmd->autoneg = AUTONEG_ENABLE; @@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev, return 0; } +static u32 nicvf_get_link(struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + + return nic->link_up; +} + static void nicvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev, static const struct ethtool_ops nicvf_ethtool_ops = { .get_settings = nicvf_get_settings, - .get_link = ethtool_op_get_link, + .get_link = nicvf_get_link, .get_drvinfo = nicvf_get_drvinfo, .get_msglevel = nicvf_get_msglevel, .set_msglevel = nicvf_set_msglevel, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 7f709cbdcd87..dde8dc720cd3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev) netif_carrier_off(netdev); netif_tx_stop_all_queues(nic->netdev); + nic->link_up = false; /* Teardown secondary qsets first */ if (!nic->sqs_mode) { @@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev) nic->drv_stats.txq_stop = 0; nic->drv_stats.txq_wake = 0; - netif_carrier_on(netdev); - netif_tx_start_all_queues(netdev); - return 0; cleanup: nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index e404ea837727..206b6a71a545 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, - qidx, nic->cq_coalesce_usecs); + qidx, CMP_QUEUE_TIMER_THRESH); } /* Configures transmit queue */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index fb4957d09914..033e8306e91c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -76,7 +76,7 @@ #define CMP_QSIZE CMP_QUEUE_SIZE2 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) #define CMP_QUEUE_CQE_THRESH 0 -#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ +#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 180aa9fabf48..9df26c2263bc 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) } EXPORT_SYMBOL(bgx_set_lmac_mac); +void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + u64 cfg; + + if (!bgx) + return; + + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + if (enable) + cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; + else + cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); +} +EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); + static void bgx_sgmii_change_link_state(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; @@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work) lmac->last_duplex = 1; } else { lmac->link_up = 0; + lmac->last_speed = SPEED_UNKNOWN; + lmac->last_duplex = DUPLEX_UNKNOWN; } if (lmac->last_link != lmac->link_up) { @@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } /* Enable lmac */ - bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, - CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); /* Restore default cfg, incase low level firmware changed it */ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); @@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) lmac = &bgx->lmac[lmacid]; if (lmac->check_link) { /* Destroy work queue */ - cancel_delayed_work(&lmac->dwork); - flush_workqueue(lmac->check_link); + cancel_delayed_work_sync(&lmac->dwork); destroy_workqueue(lmac->check_link); } @@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct bgx *bgx = NULL; u8 lmac; + /* Load octeon mdio driver */ + octeon_mdiobus_force_mod_depencency(); + bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); if (!bgx) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 07b7ec66c60d..149e179363a1 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -182,6 +182,8 @@ enum MCAST_MODE { #define BCAST_ACCEPT 1 #define CAM_ACCEPT 1 +void octeon_mdiobus_force_mod_depencency(void); +void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); unsigned bgx_get_map(int node); int bgx_get_lmac_count(int node, int bgx); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index ed41559bae77..b553409e04ad 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800; #elif defined(__mips__) static int csr0 = 0x00200000 | 0x4000; #else -#warning Processor architecture undefined! -static int csr0 = 0x00A00000 | 0x4800; +static int csr0; #endif /* Operational parameters that usually are not changed. */ @@ -1982,6 +1981,12 @@ static int __init tulip_init (void) pr_info("%s", version); #endif + if (!csr0) { + pr_warn("tulip: unknown CPU architecture, using default csr0\n"); + /* default to 8 longword cache line alignment */ + csr0 = 0x00A00000 | 0x4800; + } + /* copy module parms into globals */ tulip_rx_copybreak = rx_copybreak; tulip_max_interrupt_work = max_interrupt_work; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 9beb3d34d4ba..3c0e4d5c5fef 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev) #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) i |= 0x4800; #else -#warning Processor architecture undefined + dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n"); i |= 0x4800; #endif iowrite32(i, ioaddr + PCIBusCfg); diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ff76d4e9dc1b..bee32a9d9876 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE default y depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ M523x || M527x || M5272 || M528x || M520x || M532x || \ - ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ + ARCH_LAYERSCAPE ---help--- If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3e6b9b437497..7cf898455e60 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np, if (model && strcasecmp(model, "FEC")) { gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); - if (gfar_irq(grp, TX)->irq == NO_IRQ || - gfar_irq(grp, RX)->irq == NO_IRQ || - gfar_irq(grp, ER)->irq == NO_IRQ) + if (!gfar_irq(grp, TX)->irq || + !gfar_irq(grp, RX)->irq || + !gfar_irq(grp, ER)->irq) return -EINVAL; } diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 664d0c261269..b40fba929d65 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) etsects->irq = platform_get_irq(dev, 0); - if (etsects->irq == NO_IRQ) { + if (etsects->irq < 0) { pr_err("irq not in device tree\n"); goto no_node; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 639263d5e833..7781e80896a6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* verify the skb head is not shared */ err = skb_cow_head(skb, 0); - if (err) + if (err) { + dev_kfree_skb(skb); return NETDEV_TX_OK; + } /* locate vlan header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e84c7f2634d3..ed622fa29dfa 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -36,7 +36,7 @@ /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) -#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) +#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) @@ -62,6 +62,7 @@ #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) #define MVNETA_BASE_ADDR_ENABLE 0x2290 +#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 #define MVNETA_PORT_CONFIG 0x2400 #define MVNETA_UNI_PROMISC_MODE BIT(0) #define MVNETA_DEF_RXQ(q) ((q) << 1) @@ -159,7 +160,7 @@ #define MVNETA_INTR_ENABLE 0x25b8 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 -#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff #define MVNETA_RXQ_CMD 0x2680 #define MVNETA_RXQ_DISABLE_SHIFT 8 @@ -242,6 +243,7 @@ #define MVNETA_VLAN_TAG_LEN 4 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 +#define MVNETA_TX_CSUM_DEF_SIZE 1600 #define MVNETA_TX_CSUM_MAX_SIZE 9800 #define MVNETA_ACC_MODE_EXT 1 @@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); - if (!skb) - goto err_drop_frame; + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ dma_unmap_single(dev->dev.parent, phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + if (!skb) + goto err_drop_frame; + rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, } mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); } /* Power up the port */ @@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev) char hw_mac_addr[ETH_ALEN]; const char *mac_from; const char *managed; + int tx_csum_limit; int phy_mode; int err; int cpu; @@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(dn, "marvell,armada-370-neta")) - pp->tx_csum_limit = 1600; + if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { + if (tx_csum_limit < 0 || + tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + dev_info(&pdev->dev, + "Wrong TX csum limit in DT, set to %dB\n", + MVNETA_TX_CSUM_DEF_SIZE); + } + } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + } else { + tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; + } + + pp->tx_csum_limit = tx_csum_limit; pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index b159ef8303cc..057665180f13 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) /* Get platform resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { + if (!res || irq < 0) { dev_err(&pdev->dev, "error getting resources.\n"); ret = -ENXIO; goto err_exit; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ee8d1ec61fab..ed5da4d47668 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1225,7 +1225,7 @@ static int ravb_open(struct net_device *ndev) /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq; + goto out_free_irq2; ravb_emac_init(ndev); /* Initialise PTP Clock driver */ @@ -1243,9 +1243,11 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ ravb_ptp_stop(ndev); +out_free_irq2: + if (priv->chip_id == RCAR_GEN3) + free_irq(priv->emac_irq, ndev); out_free_irq: free_irq(ndev->irq, ndev); - free_irq(priv->emac_irq, ndev); out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 7f6f4a4fcc70..58c05acc2aab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { const char *rs; + dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; + err = of_property_read_string(np, "st,tx-retime-src", &rs); if (err < 0) { dev_warn(dev, "Use internal clock source\n"); - dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; - } else if (!strcasecmp(rs, "clk_125")) { - dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; - } else if (!strcasecmp(rs, "txclk")) { - dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; + } else { + if (!strcasecmp(rs, "clk_125")) + dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; + else if (!strcasecmp(rs, "txclk")) + dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; } - dwmac->speed = SPEED_1000; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 64d8aa4e0cad..3c6549aee11d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } } @@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + /* check if frame_len fits the preallocated memory */ + if (frame_len > priv->dma_buf_sz) { + priv->dev->stats.rx_length_errors++; + break; + } + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) */ @@ -3102,6 +3108,7 @@ int stmmac_resume(struct net_device *ndev) init_dma_desc_rings(ndev, GFP_ATOMIC); stmmac_hw_setup(ndev, false); stmmac_init_tx_coalesce(priv); + stmmac_set_rx_mode(ndev); napi_enable(&priv->napi); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ebf6abc4853f..bba670c42e37 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus) #ifdef CONFIG_OF if (priv->device->of_node) { - int reset_gpio, active_low; if (data->reset_gpio < 0) { struct device_node *np = priv->device->of_node; @@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus) "snps,reset-active-low"); of_property_read_u32_array(np, "snps,reset-delays-us", data->delays, 3); - } - reset_gpio = data->reset_gpio; - active_low = data->active_low; + if (gpio_request(data->reset_gpio, "mdio-reset")) + return 0; + } - if (!gpio_request(reset_gpio, "mdio-reset")) { - gpio_direction_output(reset_gpio, active_low ? 1 : 0); - if (data->delays[0]) - msleep(DIV_ROUND_UP(data->delays[0], 1000)); + gpio_direction_output(data->reset_gpio, + data->active_low ? 1 : 0); + if (data->delays[0]) + msleep(DIV_ROUND_UP(data->delays[0], 1000)); - gpio_set_value(reset_gpio, active_low ? 0 : 1); - if (data->delays[1]) - msleep(DIV_ROUND_UP(data->delays[1], 1000)); + gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1); + if (data->delays[1]) + msleep(DIV_ROUND_UP(data->delays[1], 1000)); - gpio_set_value(reset_gpio, active_low ? 1 : 0); - if (data->delays[2]) - msleep(DIV_ROUND_UP(data->delays[2], 1000)); - } + gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0); + if (data->delays[2]) + msleep(DIV_ROUND_UP(data->delays[2], 1000)); } #endif diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index c08be62bceba..1562ab4151e1 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave, int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) { + if (of_machine_is_compatible("ti,dm8148")) + return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); + if (of_machine_is_compatible("ti,am33xx")) return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 54036ae0a388..0fc521941c71 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk) wait_queue_head_t *wqueue; if (!sock_writeable(sk) || - !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) + !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); @@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || - (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && sock_writeable(&q->sk))) mask |= POLLOUT | POLLWRNORM; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 07a6119121c3..3ce5d9514623 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5461, 0xfffffff0 }, { PHY_ID_BCM54616S, 0xfffffff0 }, { PHY_ID_BCM5464, 0xfffffff0 }, - { PHY_ID_BCM5482, 0xfffffff0 }, + { PHY_ID_BCM5481, 0xfffffff0 }, { PHY_ID_BCM5482, 0xfffffff0 }, { PHY_ID_BCM50610, 0xfffffff0 }, { PHY_ID_BCM50610M, 0xfffffff0 }, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 48ce6ef400fe..47cd306dbb3c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) mdiobus_write(phydev->bus, mii_data->phy_id, mii_data->reg_num, val); - if (mii_data->reg_num == MII_BMCR && + if (mii_data->phy_id == phydev->addr && + mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b1878faea397..f0db770e8b2f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) mask |= POLLIN | POLLRDNORM; if (sock_writeable(sk) || - (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && sock_writeable(sk))) mask |= POLLOUT | POLLWRNORM; @@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk) if (!sock_writeable(sk)) return; - if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) + if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a187f08113ec..3b1ba8237768 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) { - const struct usb_cdc_union_desc *union_desc = NULL; struct cdc_ncm_ctx *ctx; struct usb_driver *driver; u8 *buf; @@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ /* parse through descriptors associated with control interface */ cdc_parse_cdc_header(&hdr, intf, buf, len); - ctx->data = usb_ifnum_to_if(dev->udev, - hdr.usb_cdc_union_desc->bSlaveInterface0); + if (hdr.usb_cdc_union_desc) + ctx->data = usb_ifnum_to_if(dev->udev, + hdr.usb_cdc_union_desc->bSlaveInterface0); ctx->ether_desc = hdr.usb_cdc_ether_desc; ctx->func_desc = hdr.usb_cdc_ncm_desc; ctx->mbim_desc = hdr.usb_cdc_mbim_desc; ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; /* some buggy devices have an IAD but no CDC Union */ - if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { + if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 34799eaace41..9a5be8b85186 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -725,6 +725,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ + {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 899ea4288197..417903715437 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, &adapter->pdev->dev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + rbi->dma_addr)) { + dev_kfree_skb_any(rbi->skb); + rq->stats.rx_buf_alloc_failure++; + break; + } } else { /* rx buffer skipped by the device */ } @@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, &adapter->pdev->dev, rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + rbi->dma_addr)) { + put_page(rbi->page); + rq->stats.rx_buf_alloc_failure++; + break; + } } else { /* rx buffers skipped by the device */ } val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; } - BUG_ON(rbi->dma_addr == 0); gd->rxd.addr = cpu_to_le64(rbi->dma_addr); gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | rbi->len); @@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, } -static void +static int vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter) @@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi->dma_addr = dma_map_single(&adapter->pdev->dev, skb->data + buf_offset, buf_size, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) + return -EFAULT; tbi->len = buf_size; @@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, buf_offset, buf_size, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) + return -EFAULT; tbi->len = buf_size; @@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, /* set the last buf_info for the pkt */ tbi->skb = skb; tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; + + return 0; } @@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } /* fill tx descs related to addr & len */ - vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); + if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) + goto unlock_drop_pkt; /* setup the EOP desc */ ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); @@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_rx_buf_info *rbi; struct sk_buff *skb, *new_skb = NULL; struct page *new_page = NULL; + dma_addr_t new_dma_addr; int num_to_alloc; struct Vmxnet3_RxDesc *rxd; u32 idx, ring_idx; @@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } + new_dma_addr = dma_map_single(&adapter->pdev->dev, + new_skb->data, rbi->len, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + new_dma_addr)) { + dev_kfree_skb(new_skb); + /* Skb allocation failed, do not handover this + * skb to stack. Reuse it. Drop the existing pkt + */ + rq->stats.rx_buf_alloc_failure++; + ctx->skb = NULL; + rq->stats.drop_total++; + skip_page_frags = true; + goto rcd_done; + } dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, rbi->len, @@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->skb = new_skb; - rbi->dma_addr = dma_map_single(&adapter->pdev->dev, - rbi->skb->data, rbi->len, - PCI_DMA_FROMDEVICE); + rbi->dma_addr = new_dma_addr; rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->len = rbi->len; if (adapter->version == 2 && @@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } + new_dma_addr = dma_map_page(&adapter->pdev->dev + , rbi->page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + new_dma_addr)) { + put_page(new_page); + rq->stats.rx_buf_alloc_failure++; + dev_kfree_skb(ctx->skb); + ctx->skb = NULL; + skip_page_frags = true; + goto rcd_done; + } dma_unmap_page(&adapter->pdev->dev, rbi->dma_addr, rbi->len, @@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->page = new_page; - rbi->dma_addr = dma_map_page(&adapter->pdev->dev - , rbi->page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + rbi->dma_addr = new_dma_addr; rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->len = rbi->len; } @@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev) PCI_DMA_TODEVICE); } - if (new_table_pa) { + if (!dma_mapping_error(&adapter->pdev->dev, + new_table_pa)) { new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { @@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { + dev_err(&pdev->dev, "Failed to map dma\n"); + err = -EFAULT; + goto err_dma_map; + } adapter->shared = dma_alloc_coherent( &adapter->pdev->dev, sizeof(struct Vmxnet3_DriverShared), @@ -3233,6 +3281,7 @@ err_alloc_queue_desc: err_alloc_shared: dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); +err_dma_map: free_netdev(netdev); return err; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 92fa3e1ea65c..4f9748457f5a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); - int err; if (!data || !data[IFLA_VRF_TABLE]) return -EINVAL; @@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, dev->priv_flags |= IFF_L3MDEV_MASTER; - err = register_netdevice(dev); - if (err < 0) - goto out_fail; - - return 0; - -out_fail: - free_netdev(dev); - return err; + return register_netdevice(dev); } static size_t vrf_nl_getsize(const struct net_device *dev) diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index e92aaf615901..89541cc90e87 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) used = pvc_is_used(pvc); - if (type == ARPHRD_ETHER) { + if (type == ARPHRD_ETHER) dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, ether_setup); - dev->priv_flags &= ~IFF_TX_SKB_SHARING; - } else + else dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); if (!dev) { @@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) return -ENOBUFS; } - if (type == ARPHRD_ETHER) + if (type == ARPHRD_ETHER) { + dev->priv_flags &= ~IFF_TX_SKB_SHARING; eth_hw_addr_random(dev); - else { + } else { *(__be16*)dev->dev_addr = htons(dlci); dlci_to_q922(dev->broadcast, dlci); } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 5c47b011a9d7..cd39025d2abf 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty, static int x25_asy_open_tty(struct tty_struct *tty) { - struct x25_asy *sl = tty->disc_data; + struct x25_asy *sl; int err; if (tty->ops->write == NULL) return -EOPNOTSUPP; - /* First make sure we're not already connected. */ - if (sl && sl->magic == X25_ASY_MAGIC) - return -EEXIST; - /* OK. Find a free X.25 channel to use. */ sl = x25_asy_alloc(); if (sl == NULL) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index aa9bd92ac4ed..0947cc271e69 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = QCA988X_HW_2_0_VERSION, + .dev_id = QCA988X_2_0_DEVICE_ID, .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA6174_HW_2_1_VERSION, + .dev_id = QCA6164_2_1_DEVICE_ID, + .name = "qca6164 hw2.1", + .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, + .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .fw = { + .dir = QCA6174_HW_2_1_FW_DIR, + .fw = QCA6174_HW_2_1_FW_FILE, + .otp = QCA6174_HW_2_1_OTP_FILE, + .board = QCA6174_HW_2_1_BOARD_DATA_FILE, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + }, + { + .id = QCA6174_HW_2_1_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA6174_HW_3_0_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA6174_HW_3_2_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA99X0_HW_2_0_DEV_VERSION, + .dev_id = QCA99X0_2_0_DEVICE_ID, .name = "qca99x0 hw2.0", .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA9377_HW_1_0_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, .name = "qca9377 hw1.0", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, - .uart_pin = 7, + .uart_pin = 6, .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .fw = { + .dir = QCA9377_HW_1_0_FW_DIR, + .fw = QCA9377_HW_1_0_FW_FILE, + .otp = QCA9377_HW_1_0_OTP_FILE, + .board = QCA9377_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9377_BOARD_DATA_SZ, + .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, + }, + }, + { + .id = QCA9377_HW_1_1_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, + .name = "qca9377 hw1.1", + .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .fw = QCA9377_HW_1_0_FW_FILE, @@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar) for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { hw_params = &ath10k_hw_params_list[i]; - if (hw_params->id == ar->target_version) + if (hw_params->id == ar->target_version && + hw_params->dev_id == ar->dev_id) break; } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 018c64f4fd25..858d75f49a9f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -636,6 +636,7 @@ struct ath10k { struct ath10k_hw_params { u32 id; + u16 dev_id; const char *name; u32 patch_load_addr; int uart_pin; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 39966a05c1cc..713c2bcea178 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -22,6 +22,12 @@ #define ATH10K_FW_DIR "ath10k" +#define QCA988X_2_0_DEVICE_ID (0x003c) +#define QCA6164_2_1_DEVICE_ID (0x0041) +#define QCA6174_2_1_DEVICE_ID (0x003e) +#define QCA99X0_2_0_DEVICE_ID (0x0040) +#define QCA9377_1_0_DEVICE_ID (0x0042) + /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 @@ -42,6 +48,10 @@ #define QCA6174_HW_3_0_VERSION 0x05020000 #define QCA6174_HW_3_2_VERSION 0x05030000 +/* QCA9377 target BMI version signatures */ +#define QCA9377_HW_1_0_DEV_VERSION 0x05020000 +#define QCA9377_HW_1_1_DEV_VERSION 0x05020001 + enum qca6174_pci_rev { QCA6174_PCI_REV_1_1 = 0x11, QCA6174_PCI_REV_1_3 = 0x13, @@ -60,6 +70,11 @@ enum qca6174_chip_id_rev { QCA6174_HW_3_2_CHIP_ID_REV = 10, }; +enum qca9377_chip_id_rev { + QCA9377_HW_1_0_CHIP_ID_REV = 0x0, + QCA9377_HW_1_1_CHIP_ID_REV = 0x1, +}; + #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" #define QCA6174_HW_2_1_FW_FILE "firmware.bin" #define QCA6174_HW_2_1_OTP_FILE "otp.bin" @@ -85,8 +100,6 @@ enum qca6174_chip_id_rev { #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 /* QCA9377 1.0 definitions */ -#define QCA9377_HW_1_0_DEV_VERSION 0x05020001 -#define QCA9377_HW_1_0_CHIP_ID_REV 0x1 #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" #define QCA9377_HW_1_0_FW_FILE "firmware.bin" #define QCA9377_HW_1_0_OTP_FILE "otp.bin" diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a7411fe90cc4..95a55405ebf0 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) static u32 get_nss_from_chainmask(u16 chain_mask) { - if ((chain_mask & 0x15) == 0x15) + if ((chain_mask & 0xf) == 0xf) return 4; else if ((chain_mask & 0x7) == 0x7) return 3; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 3fca200b986c..930785a724e1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define ATH10K_PCI_TARGET_WAIT 3000 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 -#define QCA988X_2_0_DEVICE_ID (0x003c) -#define QCA6164_2_1_DEVICE_ID (0x0041) -#define QCA6174_2_1_DEVICE_ID (0x003e) -#define QCA99X0_2_0_DEVICE_ID (0x0040) -#define QCA9377_1_0_DEVICE_ID (0x0042) - static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ @@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); @@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); -static const struct ce_attr host_ce_config_wlan[] = { +static struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, @@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = ath10k_pci_htc_rx_cb, + .recv_cb = ath10k_pci_htt_htc_rx_cb, }, /* CE2: target->host WMI */ @@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = { }; /* Target firmware's Copy Engine configuration. */ -static const struct ce_pipe_config target_ce_config_wlan[] = { +static struct ce_pipe_config target_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { .pipenum = __cpu_to_le32(0), @@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { * This table is derived from the CE_PCI TABLE, above. * It is passed to the Target at startup for use by firmware. */ -static const struct service_to_pipe target_service_to_ce_map_wlan[] = { +static struct service_to_pipe target_service_to_ce_map_wlan[] = { { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ @@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } +static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, 4); + + ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + /* Called by lower (CE) layer when a send to HTT Target completes. */ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) { @@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar) return 0; } +static void ath10k_pci_override_ce_config(struct ath10k *ar) +{ + struct ce_attr *attr; + struct ce_pipe_config *config; + + /* For QCA6174 we're overriding the Copy Engine 5 configuration, + * since it is currently used for other feature. + */ + + /* Override Host's Copy Engine 5 configuration */ + attr = &host_ce_config_wlan[5]; + attr->src_sz_max = 0; + attr->dest_nentries = 0; + + /* Override Target firmware's Copy Engine configuration */ + config = &target_ce_config_wlan[5]; + config->pipedir = __cpu_to_le32(PIPEDIR_OUT); + config->nbytes_max = __cpu_to_le32(2048); + + /* Map from service/endpoint to Copy Engine */ + target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); +} + static int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_core_destroy; } + if (QCA_REV_6174(ar)) + ath10k_pci_override_ce_config(ar); + ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 1a73c7a1da77..bf88ec3a65fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 17 +#define IWL7260_UCODE_API_MAX 19 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 0116e5a4c393..9bcc0bf937d8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 17 +#define IWL8000_UCODE_API_MAX 19 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 13 diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 85ae902df7c0..29ae58ebf223 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, * to transmit packets to the AP, i.e. the PTK. */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { - key->hw_key_idx = 0; mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); } else { /* * firmware only supports TSC/RSC for a single key, @@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, * with new ones -- this relies on mac80211 doing * list_add_tail(). */ - key->hw_key_idx = 1; mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); } - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); data->error = ret != 0; out_unlock: mutex_unlock(&mvm->mutex); @@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - /* We reprogram keys and shouldn't allocate new key indices */ - memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; mvm->ptk_ivlen = 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1fb684693040..e88afac51c5d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + u8 key_offset; if (iwlwifi_mod_params.sw_crypto) { IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); @@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; } + /* in HW restart reuse the index, otherwise request a new one */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + key_offset = key->hw_key_idx; + else + key_offset = STA_KEY_IDX_INVALID; + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, - test_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)); + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); /* diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 300a249486e4..354acbde088e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) return max_offs; } -static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, +static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) - return mvmvif->ap_sta_id; + mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + u8 sta_id = mvmvif->ap_sta_id; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + /* + * It is possible that the 'sta' parameter is NULL, + * for example when a GTK is removed - the sta_id will then + * be the AP ID, and no station was passed by mac80211. + */ + if (IS_ERR_OR_NULL(sta)) + return IWL_MVM_STATION_COUNT; + + return sta_id; + } return IWL_MVM_STATION_COUNT; } @@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, struct ieee80211_key_conf *keyconf, bool mcast, - u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) + u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, + u8 key_offset) { struct iwl_mvm_add_sta_key_cmd cmd = {}; __le16 key_flags; @@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - cmd.key_offset = keyconf->hw_key_idx; + cmd.key_offset = key_offset; cmd.key_flags = key_flags; cmd.sta_id = sta_id; @@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, + u8 key_offset, bool mcast) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - seq.tkip.iv32, p1k, 0); + seq.tkip.iv32, p1k, 0, key_offset); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - 0, NULL, 0); + 0, NULL, 0, key_offset); break; default: ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - 0, NULL, 0); + 0, NULL, 0, key_offset); } return ret; @@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, - bool have_key_offset) + u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; @@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(vif, sta); + sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); if (sta_id == IWL_MVM_STATION_COUNT) { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; @@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; - if (!have_key_offset) { - /* - * The D3 firmware hardcodes the PTK offset to 0, so we have to - * configure it there. As a result, this workaround exists to - * let the caller set the key offset (hw_key_idx), see d3.c. - */ - keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); - if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) + /* If the key_offset is not pre-assigned, we need to find a + * new offset to use. In normal cases, the offset is not + * pre-assigned, but during HW_RESTART we want to reuse the + * same indices, so we pass them when this function is called. + * + * In D3 entry, we need to hardcoded the indices (because the + * firmware hardcodes the PTK offset to 0). In this case, we + * need to make sure we don't overwrite the hw_key_idx in the + * keyconf structure, because otherwise we cannot configure + * the original ones back when resuming. + */ + if (key_offset == STA_KEY_IDX_INVALID) { + key_offset = iwl_mvm_set_fw_key_idx(mvm); + if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; + keyconf->hw_key_idx = key_offset; } - ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); if (ret) { __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); goto end; @@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, */ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { - ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, + key_offset, !mcast); if (ret) { __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); @@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(vif, sta); + sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); @@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, return 0; } - /* - * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station table, - * for example when a GTK is removed (where the sta_id will then be - * the AP ID, and no station was passed by mac80211.) - */ - if (!sta) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - if (!sta) { - IWL_ERR(mvm, "Invalid station id\n"); - return -EINVAL; - } - } - - if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) - return -EINVAL; - ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; @@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; - u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); + u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) @@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, mvm_sta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - iv32, phase1key, CMD_ASYNC); + iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); rcu_read_unlock(); } diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index eedb215eba3f..0631cc0a6d3c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - bool have_key_offset); + struct ieee80211_key_conf *keyconf, + u8 key_offset); int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 644b58bc5226..639761fb2bfb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, @@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 6e9418ed90c2..bbb789f8990b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - if (!rtlpci->int_clear) + if (rtlpci->int_clear) rtl8821ae_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 8ee141a55bc5..142bdff4ed60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); -MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); +MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 219dc206fa5f..a5fe23952586 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_BLK_DEV_NVME) += nvme.o -nvme-y += pci.o scsi.o lightnvm.o +lightnvm-$(CONFIG_NVM) := lightnvm.o +nvme-y += pci.o scsi.o $(lightnvm-y) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..06c336410235 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -22,8 +22,6 @@ #include "nvme.h" -#ifdef CONFIG_NVM - #include <linux/nvme.h> #include <linux/bitops.h> #include <linux/lightnvm.h> @@ -93,7 +91,7 @@ struct nvme_nvm_l2ptbl { __le16 cdw14[6]; }; -struct nvme_nvm_bbtbl { +struct nvme_nvm_getbbtbl { __u8 opcode; __u8 flags; __u16 command_id; @@ -101,10 +99,23 @@ struct nvme_nvm_bbtbl { __u64 rsvd[2]; __le64 prp1; __le64 prp2; - __le32 prp1_len; - __le32 prp2_len; - __le32 lbb; - __u32 rsvd11[3]; + __le64 spba; + __u32 rsvd4[4]; +}; + +struct nvme_nvm_setbbtbl { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 nlb; + __u8 value; + __u8 rsvd3; + __u32 rsvd4[3]; }; struct nvme_nvm_erase_blk { @@ -129,8 +140,8 @@ struct nvme_nvm_command { struct nvme_nvm_hb_rw hb_rw; struct nvme_nvm_ph_rw ph_rw; struct nvme_nvm_l2ptbl l2p; - struct nvme_nvm_bbtbl get_bb; - struct nvme_nvm_bbtbl set_bb; + struct nvme_nvm_getbbtbl get_bb; + struct nvme_nvm_setbbtbl set_bb; struct nvme_nvm_erase_blk erase; }; }; @@ -142,11 +153,13 @@ struct nvme_nvm_id_group { __u8 num_ch; __u8 num_lun; __u8 num_pln; + __u8 rsvd1; __le16 num_blk; __le16 num_pg; __le16 fpg_sz; __le16 csecs; __le16 sos; + __le16 rsvd2; __le32 trdt; __le32 trdm; __le32 tprt; @@ -154,8 +167,9 @@ struct nvme_nvm_id_group { __le32 tbet; __le32 tbem; __le32 mpos; + __le32 mccap; __le16 cpar; - __u8 reserved[913]; + __u8 reserved[906]; } __packed; struct nvme_nvm_addr_format { @@ -178,15 +192,28 @@ struct nvme_nvm_id { __u8 ver_id; __u8 vmnt; __u8 cgrps; - __u8 res[5]; + __u8 res; __le32 cap; __le32 dom; struct nvme_nvm_addr_format ppaf; - __u8 ppat; - __u8 resv[223]; + __u8 resv[228]; struct nvme_nvm_id_group groups[4]; } __packed; +struct nvme_nvm_bb_tbl { + __u8 tblid[4]; + __le16 verid; + __le16 revid; + __le32 rvsd1; + __le32 tblks; + __le32 tfact; + __le32 tgrown; + __le32 tdresv; + __le32 thresv; + __le32 rsvd2[8]; + __u8 blk[0]; +}; + /* * Check we didn't inadvertently grow the command struct */ @@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); - BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); } static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) @@ -234,6 +263,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) dst->tbet = le32_to_cpu(src->tbet); dst->tbem = le32_to_cpu(src->tbem); dst->mpos = le32_to_cpu(src->mpos); + dst->mccap = le32_to_cpu(src->mccap); dst->cpar = le16_to_cpu(src->cpar); } @@ -244,6 +274,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) { struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_command c = {}; int ret; @@ -256,8 +287,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) if (!nvme_nvm_id) return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, - sizeof(struct nvme_nvm_id)); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + nvme_nvm_id, sizeof(struct nvme_nvm_id)); if (ret) { ret = -EIO; goto out; @@ -268,6 +299,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) nvm_id->cgrps = nvme_nvm_id->cgrps; nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); + memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, + sizeof(struct nvme_nvm_addr_format)); ret = init_grps(nvm_id, nvme_nvm_id); out: @@ -281,7 +314,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - u32 len = queue_max_hw_sectors(q) << 9; + u32 len = queue_max_hw_sectors(dev->admin_q) << 9; u32 nlb_pr_rq = len / sizeof(u64); u64 cmd_slba = slba; void *entries; @@ -299,8 +332,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.nlb = cpu_to_le32(cmd_nlb); - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, - entries, len); + ret = nvme_submit_sync_cmd(dev->admin_q, + (struct nvme_command *)&c, entries, len); if (ret) { dev_err(dev->dev, "L2P table transfer failed (%d)\n", ret); @@ -322,43 +355,84 @@ out: return ret; } -static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, - unsigned int nr_blocks, - nvm_bb_update_fn *update_bbtbl, void *priv) +static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, + int nr_blocks, nvm_bb_update_fn *update_bbtbl, + void *priv) { + struct request_queue *q = nvmdev->q; struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - void *bb_bitmap; - u16 bb_bitmap_size; + struct nvme_nvm_bb_tbl *bb_tbl; + int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; int ret = 0; c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; c.get_bb.nsid = cpu_to_le32(ns->ns_id); - c.get_bb.lbb = cpu_to_le32(lunid); - bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; - bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); - if (!bb_bitmap) - return -ENOMEM; + c.get_bb.spba = cpu_to_le64(ppa.ppa); - bitmap_zero(bb_bitmap, nr_blocks); + bb_tbl = kzalloc(tblsz, GFP_KERNEL); + if (!bb_tbl) + return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, - bb_bitmap_size); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + bb_tbl, tblsz); if (ret) { dev_err(dev->dev, "get bad block table failed (%d)\n", ret); ret = -EIO; goto out; } - ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); + if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || + bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { + dev_err(dev->dev, "bbt format mismatch\n"); + ret = -EINVAL; + goto out; + } + + if (le16_to_cpu(bb_tbl->verid) != 1) { + ret = -EINVAL; + dev_err(dev->dev, "bbt version not supported\n"); + goto out; + } + + if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { + ret = -EINVAL; + dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", + le32_to_cpu(bb_tbl->tblks), nr_blocks); + goto out; + } + + ppa = dev_to_generic_addr(nvmdev, ppa); + ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); if (ret) { ret = -EINTR; goto out; } out: - kfree(bb_bitmap); + kfree(bb_tbl); + return ret; +} + +static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, + int type) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; + struct nvme_nvm_command c = {}; + int ret = 0; + + c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; + c.set_bb.nsid = cpu_to_le32(ns->ns_id); + c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); + c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); + c.set_bb.value = type; + + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + NULL, 0); + if (ret) + dev_err(dev->dev, "set bad block table failed (%d)\n", ret); return ret; } @@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .get_l2p_tbl = nvme_nvm_get_l2p_tbl, .get_bb_tbl = nvme_nvm_get_bb_tbl, + .set_bb_tbl = nvme_nvm_set_bb_tbl, .submit_io = nvme_nvm_submit_io, .erase_block = nvme_nvm_erase_block, @@ -496,31 +571,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name) nvm_unregister(disk_name); } +/* move to shared place when used in multiple places. */ +#define PCI_VENDOR_ID_CNEX 0x1d1d +#define PCI_DEVICE_ID_CNEX_WL 0x2807 +#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f + int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_dev *dev = ns->dev; struct pci_dev *pdev = to_pci_dev(dev->dev); /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ - if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && + if (pdev->vendor == PCI_VENDOR_ID_CNEX && + pdev->device == PCI_DEVICE_ID_CNEX_QEMU && id->vs[0] == 0x1) return 1; /* CNEX Labs - PCI ID + Vendor specific bit */ - if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && + if (pdev->vendor == PCI_VENDOR_ID_CNEX && + pdev->device == PCI_DEVICE_ID_CNEX_WL && id->vs[0] == 0x1) return 1; return 0; } -#else -int nvme_nvm_register(struct request_queue *q, char *disk_name) -{ - return 0; -} -void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; -int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) -{ - return 0; -} -#endif /* CONFIG_NVM */ diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index fdb4e5bad9ac..044253dca30a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); int nvme_sg_get_version_num(int __user *ip); +#ifdef CONFIG_NVM int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); int nvme_nvm_register(struct request_queue *q, char *disk_name); void nvme_nvm_unregister(struct request_queue *q, char *disk_name); +#else +static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) +{ + return 0; +} + +static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; + +static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + return 0; +} +#endif /* CONFIG_NVM */ #endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8187df204695..9e294ff4e652 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, goto retry_cmd; } if (blk_integrity_rq(req)) { - if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) + if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } sg_init_table(iod->meta_sg, 1); if (blk_rq_map_integrity_sg( - req->q, req->bio, iod->meta_sg) != 1) + req->q, req->bio, iod->meta_sg) != 1) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } if (rq_data_dir(req)) nvme_dif_remap(req, nvme_dif_prep); - if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) + if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } } } @@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) return; - writel(head, nvmeq->q_db + nvmeq->dev->db_stride); + if (likely(nvmeq->cq_vector >= 0)) + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); nvmeq->cq_head = head; nvmeq->cq_phase = phase; @@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u32 aqa; u64 cap = lo_hi_readq(&dev->bar->cap); struct nvme_queue *nvmeq; - unsigned page_shift = PAGE_SHIFT; + /* + * default to a 4K page size, with the intention to update this + * path in the future to accomodate architectures with differing + * kernel and IO page sizes. + */ + unsigned page_shift = 12; unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; - unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; if (page_shift < dev_page_min) { dev_err(dev->dev, @@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) 1 << page_shift); return -ENODEV; } - if (page_shift > dev_page_max) { - dev_info(dev->dev, - "Device maximum page size (%u) smaller than " - "host (%u); enabling work-around\n", - 1 << dev_page_max, 1 << page_shift); - page_shift = dev_page_max; - } dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? NVME_CAP_NSSRC(cap) : 0; @@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) if (dev->max_hw_sectors) { blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); blk_queue_max_segments(ns->queue, - ((dev->max_hw_sectors << 9) / dev->page_size) + 1); + (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); } if (dev->stripe_size) blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); @@ -2701,6 +2708,18 @@ static int nvme_dev_map(struct nvme_dev *dev) dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); dev->db_stride = 1 << NVME_CAP_STRIDE(cap); dev->dbs = ((void __iomem *)dev->bar) + 4096; + + /* + * Temporary fix for the Apple controller found in the MacBook8,1 and + * some MacBook7,1 to avoid controller resets and data loss. + */ + if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { + dev->q_depth = 2; + dev_warn(dev->dev, "detected Apple NVMe controller, set " + "queue depth=%u to work around controller resets\n", + dev->q_depth); + } + if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) dev->cmb = nvme_map_cmb(dev); @@ -2787,6 +2806,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) { struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; nvme_put_dq(dq); + + spin_lock_irq(&nvmeq->q_lock); + nvme_process_cq(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); } static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 540f077c37ea..02a7452bdf23 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp) ret, pp->io); continue; } - pp->io_base = pp->io->start; break; case IORESOURCE_MEM: pp->mem = win->res; diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 35457ecd8e70..163671a4f798 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = { .link_up = hisi_pcie_link_up, }; -static int __init hisi_add_pcie_port(struct pcie_port *pp, +static int hisi_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { int ret; @@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp, return 0; } -static int __init hisi_pcie_probe(struct platform_device *pdev) +static int hisi_pcie_probe(struct platform_device *pdev) { struct hisi_pcie *hisi_pcie; struct pcie_port *pp; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 4446fcb5effd..d7ffd66814bb 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev) pci_dev->state_saved = false; pci_dev->no_d3cold = false; error = pm->runtime_suspend(dev); - suspend_report_result(pm->runtime_suspend, error); - if (error) + if (error) { + /* + * -EBUSY and -EAGAIN is used to request the runtime PM core + * to schedule a new suspend, so log the event only with debug + * log level. + */ + if (error == -EBUSY || error == -EAGAIN) + dev_dbg(dev, "can't suspend now (%pf returned %d)\n", + pm->runtime_suspend, error); + else + dev_err(dev, "can't suspend (%pf returned %d)\n", + pm->runtime_suspend, error); + return error; + } if (!pci_dev->d3cold_allowed) pci_dev->no_d3cold = true; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 92618686604c..eead54cd01b2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev, if (ret) return ret; - if (node >= MAX_NUMNODES || !node_online(node)) + if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) + return -EINVAL; + + if (node != NUMA_NO_NODE && !node_online(node)) return -EINVAL; add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fd2f03fa53f3..d390fc1475ec 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) } #endif -struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); - #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 7e327309cf69..c2dd52ea4198 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3405,7 +3405,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) return 0; } -#include "../gpu/drm/i915/i915_reg.h" +#define SOUTH_CHICKEN2 0xc2004 +#define PCH_PP_STATUS 0xc7200 +#define PCH_PP_CONTROL 0xc7204 #define MSG_CTL 0x45010 #define NSDE_PWR_STATE 0xd0100 #define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */ diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index b422e4ed73f4..312c78b27a32 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -5,8 +5,6 @@ config PINCTRL bool -if PINCTRL - menu "Pin controllers" depends on PINCTRL @@ -274,5 +272,3 @@ config PINCTRL_TB10X select GPIOLIB endmenu - -endif diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 88a7fac11bd4..acaf84cadca3 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np, func->groups[i] = child->name; grp = &info->groups[grp_index++]; ret = imx1_pinctrl_parse_groups(child, grp, info, i++); - if (ret == -ENOMEM) + if (ret == -ENOMEM) { + of_node_put(child); return ret; + } } return 0; @@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev, for_each_child_of_node(np, child) { ret = imx1_pinctrl_parse_functions(child, info, ifunc++); - if (ret == -ENOMEM) + if (ret == -ENOMEM) { + of_node_put(child); return -ENOMEM; + } } return 0; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index f307f1d27d64..5c717275a7fa 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset) reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; bit = BIT(offset & 0xf); regmap_read(pctl->regmap1, reg_addr, &read_val); - return !!(read_val & bit); + return !(read_val & bit); } static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) @@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) unsigned int read_val = 0; struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev); - if (mtk_gpio_get_direction(chip, offset)) - reg_addr = mtk_get_port(pctl, offset) + - pctl->devdata->dout_offset; - else - reg_addr = mtk_get_port(pctl, offset) + - pctl->devdata->din_offset; + reg_addr = mtk_get_port(pctl, offset) + + pctl->devdata->din_offset; bit = BIT(offset & 0xf); regmap_read(pctl->regmap1, reg_addr, &read_val); @@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = { .owner = THIS_MODULE, .request = gpiochip_generic_request, .free = gpiochip_generic_free, + .get_direction = mtk_gpio_get_direction, .direction_input = mtk_gpio_direction_input, .direction_output = mtk_gpio_direction_output, .get = mtk_gpio_get, diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index d809c9eaa323..19a3c3bc2f1f 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); + pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 8982027de8e8..b868ef1766a0 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); + pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c index e7deb51de7dc..9842bb106796 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c @@ -31,11 +31,11 @@ PORT_GP_12(5, fn, sfx) #undef _GP_DATA -#define _GP_DATA(bank, pin, name, sfx) \ +#define _GP_DATA(bank, pin, name, sfx, cfg) \ PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) -#define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT -#define _GP_INDT(bank, pin, name, sfx) name##_DATA +#define _GP_INOUTSEL(bank, pin, name, sfx, cfg) name##_IN, name##_OUT +#define _GP_INDT(bank, pin, name, sfx, cfg) name##_DATA #define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) #define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8b3130f22b42..9e03d158f411 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1478,6 +1478,8 @@ module_init(remoteproc_init); static void __exit remoteproc_exit(void) { + ida_destroy(&rproc_dev_index); + rproc_exit_debugfs(); } module_exit(remoteproc_exit); diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 9d30809bb407..916af5096f57 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf, char buf[10]; int ret; - if (count > sizeof(buf)) + if (count < 1 || count > sizeof(buf)) return count; ret = copy_from_user(buf, user_buf, count); diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 188006c55ce0..aa705bb4748c 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -15,9 +15,6 @@ #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> -#include <linux/pm_wakeirq.h> #include <linux/rtc/ds1307.h> #include <linux/rtc.h> #include <linux/slab.h> @@ -117,7 +114,6 @@ struct ds1307 { #define HAS_ALARM 1 /* bit 1 == irq claimed */ struct i2c_client *client; struct rtc_device *rtc; - int wakeirq; s32 (*read_block_data)(const struct i2c_client *client, u8 command, u8 length, u8 *values); s32 (*write_block_data)(const struct i2c_client *client, u8 command, @@ -1138,7 +1134,10 @@ read_rtc: bin2bcd(tmp)); } - device_set_wakeup_capable(&client->dev, want_irq); + if (want_irq) { + device_set_wakeup_capable(&client->dev, true); + set_bit(HAS_ALARM, &ds1307->flags); + } ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, rtc_ops, THIS_MODULE); if (IS_ERR(ds1307->rtc)) { @@ -1146,43 +1145,19 @@ read_rtc: } if (want_irq) { - struct device_node *node = client->dev.of_node; - err = devm_request_threaded_irq(&client->dev, client->irq, NULL, irq_handler, IRQF_SHARED | IRQF_ONESHOT, ds1307->rtc->name, client); if (err) { client->irq = 0; + device_set_wakeup_capable(&client->dev, false); + clear_bit(HAS_ALARM, &ds1307->flags); dev_err(&client->dev, "unable to request IRQ!\n"); - goto no_irq; - } - - set_bit(HAS_ALARM, &ds1307->flags); - dev_dbg(&client->dev, "got IRQ %d\n", client->irq); - - /* Currently supported by OF code only! */ - if (!node) - goto no_irq; - - err = of_irq_get(node, 1); - if (err <= 0) { - if (err == -EPROBE_DEFER) - goto exit; - goto no_irq; - } - ds1307->wakeirq = err; - - err = dev_pm_set_dedicated_wake_irq(&client->dev, - ds1307->wakeirq); - if (err) { - dev_err(&client->dev, "unable to setup wakeIRQ %d!\n", - err); - goto exit; - } + } else + dev_dbg(&client->dev, "got IRQ %d\n", client->irq); } -no_irq: if (chip->nvram_size) { ds1307->nvram = devm_kzalloc(&client->dev, @@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client) { struct ds1307 *ds1307 = i2c_get_clientdata(client); - if (ds1307->wakeirq) - dev_pm_clear_wake_irq(&client->dev); - if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 5f692ae40749..64eed87d34a8 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -364,6 +364,7 @@ config SCSI_HPSA tristate "HP Smart Array SCSI driver" depends on PCI && SCSI select CHECK_SIGNATURE + select SCSI_SAS_ATTRS help This driver supports HP Smart Array Controllers (circa 2009). It is a SCSI alternative to the cciss driver, which is a block @@ -499,6 +500,7 @@ config SCSI_ADVANSYS tristate "AdvanSys SCSI support" depends on SCSI depends on ISA || EISA || PCI + depends on ISA_DMA_API || !ISA help This is a driver for all SCSI host adapters manufactured by AdvanSys. It is documented in the kernel source in diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 519f9a4b3dad..febbd83e2ecd 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, return ASC_BUSY; } scsiqp->sense_addr = cpu_to_le32(sense_addr); - scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); + scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; /* Build ADV_SCSI_REQ_Q */ diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 323982fd00c3..82ac1cd818ac 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev) kfree(queuedata); } + if (shost->shost_state == SHOST_CREATED) { + /* + * Free the shost_dev device name here if scsi_host_alloc() + * and scsi_host_put() have been called but neither + * scsi_host_add() nor scsi_host_remove() has been called. + * This avoids that the memory allocated for the shost_dev + * name is leaked. + */ + kfree(dev_name(&shost->shost_dev)); + } + scsi_destroy_command_freelist(shost); if (shost_use_blk_mq(shost)) { if (shost->tag_set.tags) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 6a8f95808ee0..a3860367b568 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; - if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) + if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) goto out; errout: diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig index 29061467cc17..b736dbc80485 100644 --- a/drivers/scsi/mpt3sas/Kconfig +++ b/drivers/scsi/mpt3sas/Kconfig @@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this can be 256. However, it may decreased down to 16. Decreasing this parameter will reduce memory requirements on a per controller instance. + +config SCSI_MPT2SAS + tristate "Legacy MPT2SAS config option" + default n + select SCSI_MPT3SAS + depends on PCI && SCSI + ---help--- + Dummy config option for backwards compatiblity: configure the MPT3SAS + driver instead. diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index d95206b7e116..9ab77b06434d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) * We do not expose raid functionality to upper layer for warpdrive. */ if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) - && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && - scmd->cmd_len != 32) + && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 90fdf0e859e3..675e7fab0796 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev, struct device_attribute *attr, const char *buffer, size_t size) { - int val = 0; + unsigned int val = 0; struct mvs_info *mvi = NULL; struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); @@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev, if (buffer == NULL) return size; - if (sscanf(buffer, "%d", &val) != 1) + if (sscanf(buffer, "%u", &val) != 1) return -EINVAL; if (val >= 0x10000) { diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index eb0cc5475c45..b6b4cfdd7620 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, if (off_in < QLA82XX_PCI_CRBSPACE) return -1; - *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE); + off_in -= QLA82XX_PCI_CRBSPACE; /* Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; @@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, return 0; } /* Not in direct map, use crb window */ + *off_out = (void __iomem *)off_in; return 1; } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3ba2e9564b9a..81af294f15a7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); } -CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); +CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index dfcc45bb03b1..d09d60293c27 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { 0} }, {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */ - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */ + {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, + 0, 0, 0, 0, 0, 0} }, {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ @@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 20 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */ - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ + {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 83245391e956..054923e3393c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, * strings. */ if (sdev->inquiry_len < 36) { - sdev_printk(KERN_INFO, sdev, - "scsi scan: INQUIRY result too short (%d)," - " using 36\n", sdev->inquiry_len); + if (!sdev->host->short_inquiry) { + shost_printk(KERN_INFO, sdev->host, + "scsi scan: INQUIRY result too short (%d)," + " using 36\n", sdev->inquiry_len); + sdev->host->short_inquiry = 1; + } sdev->inquiry_len = 36; } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 8d2312239ae0..21930c9ac9cd 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; + /* + * This cleanup path is not reentrant and while it is impossible + * to get a new reference with scsi_device_get() someone can still + * hold a previously acquired one. + */ + if (sdev->sdev_state == SDEV_DEL) + return; + if (sdev->is_visible) { if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) return; @@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev) device_unregister(&sdev->sdev_dev); transport_remove_device(dev); scsi_dh_remove_device(sdev); - } + device_del(dev); + } else + put_device(&sdev->sdev_dev); /* * Stop accepting new requests and wait until all queuecommand() and @@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev) blk_cleanup_queue(sdev->request_queue); cancel_work_sync(&sdev->requeue_work); - /* - * Remove the device after blk_cleanup_queue() has been called such - * a possible bdi_register() call with the same name occurs after - * blk_cleanup_queue() has called bdi_destroy(). - */ - if (sdev->is_visible) - device_del(dev); - else - put_device(&sdev->sdev_dev); - if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 54519804c46a..3d22fc3e3c1a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) unsigned int max_blocks = 0; q->limits.discard_zeroes_data = 0; - q->limits.discard_alignment = sdkp->unmap_alignment * - logical_block_size; - q->limits.discard_granularity = - max(sdkp->physical_block_size, - sdkp->unmap_granularity * logical_block_size); + + /* + * When LBPRZ is reported, discard alignment and granularity + * must be fixed to the logical block size. Otherwise the block + * layer will drop misaligned portions of the request which can + * lead to data corruption. If LBPRZ is not set, we honor the + * device preference. + */ + if (sdkp->lbprz) { + q->limits.discard_alignment = 0; + q->limits.discard_granularity = 1; + } else { + q->limits.discard_alignment = sdkp->unmap_alignment * + logical_block_size; + q->limits.discard_granularity = + max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); + } sdkp->provisioning_mode = mode; @@ -2321,11 +2334,8 @@ got_data: } } - if (sdkp->capacity > 0xffffffff) { + if (sdkp->capacity > 0xffffffff) sdp->use_16_for_rw = 1; - sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS; - } else - sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS; /* Rescale capacity to 512-byte units */ if (sector_size == 4096) @@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) { unsigned int sector_sz = sdkp->device->sector_size; const int vpd_len = 64; - u32 max_xfer_length; unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); if (!buffer || @@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) goto out; - max_xfer_length = get_unaligned_be32(&buffer[8]); - if (max_xfer_length) - sdkp->max_xfer_blocks = max_xfer_length; - blk_queue_io_min(sdkp->disk->queue, get_unaligned_be16(&buffer[6]) * sector_sz); - blk_queue_io_opt(sdkp->disk->queue, - get_unaligned_be32(&buffer[12]) * sector_sz); + + sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); + sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); if (buffer[3] == 0x3c) { unsigned int lba_count, desc_count; @@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp) return 0; } +static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks) +{ + return blocks << (ilog2(sdev->sector_size) - 9); +} + /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. @@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; + struct request_queue *q = sdkp->disk->queue; unsigned char *buffer; - unsigned int max_xfer; + unsigned int dev_max, rw_max; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk) */ sd_set_flush_flag(sdkp); - max_xfer = sdkp->max_xfer_blocks; - max_xfer <<= ilog2(sdp->sector_size) - 9; + /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ + dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; + + /* Some devices report a maximum block count for READ/WRITE requests. */ + dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); + q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); + + /* + * Use the device's preferred I/O size for reads and writes + * unless the reported value is unreasonably large (or garbage). + */ + if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && + sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS) + rw_max = q->limits.io_opt = + logical_to_sectors(sdp, sdkp->opt_xfer_blocks); + else + rw_max = BLK_DEF_MAX_SECTORS; - sdkp->disk->queue->limits.max_sectors = - min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); + /* Combine with controller limits */ + q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); set_capacity(disk, sdkp->capacity); sd_config_write_same(sdkp); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 63ba5ca7f9a1..5f2a84aff29f 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -67,6 +67,7 @@ struct scsi_disk { atomic_t openers; sector_t capacity; /* size in 512-byte sectors */ u32 max_xfer_blocks; + u32 opt_xfer_blocks; u32 max_ws_blocks; u32 max_unmap_blocks; u32 unmap_granularity; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index e0a1e52a04e7..2e522951b619 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) } cdev->owner = THIS_MODULE; cdev->ops = &st_fops; + STm->cdevs[rew] = cdev; error = cdev_add(cdev, cdev_devno, 1); if (error) { @@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) pr_err("st%d: Device not attached.\n", dev_num); goto out_free; } - STm->cdevs[rew] = cdev; i = mode << (4 - ST_NBR_MODE_BITS); snprintf(name, 10, "%s%s%s", rew ? "n" : "", @@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) return 0; out_free: cdev_del(STm->cdevs[rew]); - STm->cdevs[rew] = NULL; out: + STm->cdevs[rew] = NULL; + STm->devs[rew] = NULL; return error; } diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 9d5068248aa0..0a4ea809a61b 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -23,6 +23,7 @@ config MTK_PMIC_WRAP config MTK_SCPSYS bool "MediaTek SCPSYS Support" depends on ARCH_MEDIATEK || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK select REGMAP select MTK_INFRACFG select PM_GENERIC_DOMAINS if PM diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index f3a0b6a4b54e..8c03a80b482d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev) block++; if (!block->size) - return 0; + continue; dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", block->phys, block->virt, block->size); @@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev, for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { if (knav_acc_firmwares[i]) { - ret = request_firmware(&fw, - knav_acc_firmwares[i], - kdev->dev); + ret = request_firmware_direct(&fw, + knav_acc_firmwares[i], + kdev->dev); if (!ret) { found = true; break; diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 06858e04ec59..bf9a610e5b89 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) goto out_clk_disable; } - dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", - r->start, irq, bs->fifo_size); + dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n", + r, irq, bs->fifo_size); return 0; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 563954a61424..7840067062a8 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi) if (!spi->controller_data) spi->controller_data = (void *)&mtk_default_chip_info; - if (mdata->dev_comp->need_pad_sel) + if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); return 0; @@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev) goto err_put_master; } - for (i = 0; i < master->num_chipselect; i++) { - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - dev_name(&pdev->dev)); - if (ret) { - dev_err(&pdev->dev, - "can't get CS GPIO %i\n", i); - goto err_put_master; + if (!master->cs_gpios && master->num_chipselect > 1) { + dev_err(&pdev->dev, + "cs_gpios not specified and num_chipselect > 1\n"); + ret = -EINVAL; + goto err_put_master; + } + + if (master->cs_gpios) { + for (i = 0; i < master->num_chipselect; i++) { + ret = devm_gpio_request(&pdev->dev, + master->cs_gpios[i], + dev_name(&pdev->dev)); + if (ret) { + dev_err(&pdev->dev, + "can't get CS GPIO %i\n", i); + goto err_put_master; + } } } } diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 94af80676684..5e5fd77e2711 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1171,19 +1171,31 @@ err_no_rxchan: static int pl022_dma_autoprobe(struct pl022 *pl022) { struct device *dev = &pl022->adev->dev; + struct dma_chan *chan; + int err; /* automatically configure DMA channels from platform, normally using DT */ - pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx"); - if (!pl022->dma_rx_channel) + chan = dma_request_slave_channel_reason(dev, "rx"); + if (IS_ERR(chan)) { + err = PTR_ERR(chan); goto err_no_rxchan; + } + + pl022->dma_rx_channel = chan; - pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx"); - if (!pl022->dma_tx_channel) + chan = dma_request_slave_channel_reason(dev, "tx"); + if (IS_ERR(chan)) { + err = PTR_ERR(chan); goto err_no_txchan; + } + + pl022->dma_tx_channel = chan; pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!pl022->dummypage) + if (!pl022->dummypage) { + err = -ENOMEM; goto err_no_dummypage; + } return 0; @@ -1194,7 +1206,7 @@ err_no_txchan: dma_release_channel(pl022->dma_rx_channel); pl022->dma_rx_channel = NULL; err_no_rxchan: - return -ENODEV; + return err; } static void terminate_dma(struct pl022 *pl022) @@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) /* Get DMA channels, try autoconfiguration first */ status = pl022_dma_autoprobe(pl022); + if (status == -EPROBE_DEFER) { + dev_dbg(dev, "deferring probe to get DMA channel\n"); + goto err_no_irq; + } /* If that failed, use channels from platform_info */ if (status == 0) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e2415be209d5..2b0a8ec3affb 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev) /** * __spi_register_driver - register a SPI driver + * @owner: owner module of the driver to register * @sdrv: the driver to register * Context: can sleep * @@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * Set transfer tx_nbits and rx_nbits as single transfer default * (SPI_NBITS_SINGLE) if it is not set for this transfer. */ + message->frame_length = 0; list_for_each_entry(xfer, &message->transfers, transfer_list) { message->frame_length += xfer->len; if (!xfer->bits_per_word) diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h index f5d741f25ffd..485ab2670918 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h @@ -110,7 +110,6 @@ struct libcfs_ioctl_handler { #define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) #define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) #define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) -#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long) /* lnet ioctls */ #define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) #define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c index 07a68594c279..e7c2b26156b9 100644 --- a/drivers/staging/lustre/lustre/libcfs/module.c +++ b/drivers/staging/lustre/lustre/libcfs/module.c @@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, } break; - case IOC_LIBCFS_PING_TEST: { - extern void (kping_client)(struct libcfs_ioctl_data *); - void (*ping)(struct libcfs_ioctl_data *); - - CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n", - data->ioc_count, libcfs_nid2str(data->ioc_nid), - libcfs_nid2str(data->ioc_nid)); - ping = symbol_get(kping_client); - if (!ping) - CERROR("symbol_get failed\n"); - else { - ping(data); - symbol_put(kping_client); - } - return 0; - } - default: { struct libcfs_ioctl_handler *hand; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 342a07c58d89..72204fbf2bb1 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4074,6 +4074,17 @@ reject: return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } +static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) +{ + bool ret; + + spin_lock_bh(&conn->state_lock); + ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); + spin_unlock_bh(&conn->state_lock); + + return ret; +} + int iscsi_target_rx_thread(void *arg) { int ret, rc; @@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg) * incoming iscsi/tcp socket I/O, and/or failing the connection. */ rc = wait_for_completion_interruptible(&conn->rx_login_comp); - if (rc < 0) + if (rc < 0 || iscsi_target_check_conn_state(conn)) return 0; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 5c964c09c89f..9fc9117d0f22 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -388,6 +388,7 @@ err: if (login->login_complete) { if (conn->rx_thread && conn->rx_thread_active) { send_sig(SIGINT, conn->rx_thread, 1); + complete(&conn->rx_login_comp); kthread_stop(conn->rx_thread); } if (conn->tx_thread && conn->tx_thread_active) { diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 51d1734d5390..2cbea2af7cd0 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) if (!pl) { pr_err("Unable to allocate memory for" " struct iscsi_param_list.\n"); - return -1 ; + return -ENOMEM; } INIT_LIST_HEAD(&pl->param_list); INIT_LIST_HEAD(&pl->extra_response_list); @@ -578,7 +578,7 @@ int iscsi_copy_param_list( param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); if (!param_list) { pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(¶m_list->param_list); INIT_LIST_HEAD(¶m_list->extra_response_list); @@ -629,7 +629,7 @@ int iscsi_copy_param_list( err_out: iscsi_release_param_list(param_list); - return -1; + return -ENOMEM; } static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) @@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response( if (!extra_response) { pr_err("Unable to allocate memory for" " struct iscsi_extra_response.\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&extra_response->er_list); @@ -1370,7 +1370,7 @@ int iscsi_decode_text_input( tmpbuf = kzalloc(length + 1, GFP_KERNEL); if (!tmpbuf) { pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); - return -1; + return -ENOMEM; } memcpy(tmpbuf, textbuf, length); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 0b4b2a67d9f9..98698d875742 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o return 0; } -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, + int *post_ret) { unsigned char *buf, *addr; struct scatterlist *sg; @@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd) cmd->data_direction); } -static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + int *post_ret) { struct se_device *dev = cmd->se_dev; @@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) * sent to the backend driver. */ spin_lock_irq(&cmd->t_state_lock); - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) + if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; + *post_ret = 1; + } spin_unlock_irq(&cmd->t_state_lock); /* @@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) return TCM_NO_SENSE; } -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, + int *post_ret) { struct se_device *dev = cmd->se_dev; struct scatterlist *write_sg = NULL, *sg; @@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes if (block_size < PAGE_SIZE) { sg_set_page(&write_sg[i], m.page, block_size, - block_size); + m.piter.sg->offset + block_size); } else { sg_miter_next(&m); sg_set_page(&write_sg[i], m.page, block_size, - 0); + m.piter.sg->offset); } len -= block_size; i++; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 273c72b2b83d..81a6b3e07687 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) char str[sizeof(dev->t10_wwn.model)+1]; /* scsiLuProductId */ - for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) + for (i = 0; i < sizeof(dev->t10_wwn.model); i++) str[i] = ISPRINT(dev->t10_wwn.model[i]) ? dev->t10_wwn.model[i] : ' '; str[i] = '\0'; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 5b2820312310..28fb3016370f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -130,6 +130,9 @@ void core_tmr_abort_task( if (tmr->ref_task_tag != ref_tag) continue; + if (!kref_get_unless_zero(&se_cmd->cmd_kref)) + continue; + printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); @@ -139,13 +142,15 @@ void core_tmr_abort_task( " skipping\n", ref_tag); spin_unlock(&se_cmd->t_state_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + target_put_sess_cmd(se_cmd); + goto out; } se_cmd->transport_state |= CMD_T_ABORTED; spin_unlock(&se_cmd->t_state_lock); list_del_init(&se_cmd->se_cmd_list); - kref_get(&se_cmd->cmd_kref); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); cancel_work_sync(&se_cmd->work); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5bacc7b5ed6d..4fdcee2006d1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) void transport_generic_request_failure(struct se_cmd *cmd, sense_reason_t sense_reason) { - int ret = 0; + int ret = 0, post_ret = 0; pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); @@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, */ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && cmd->transport_complete_callback) - cmd->transport_complete_callback(cmd, false); + cmd->transport_complete_callback(cmd, false, &post_ret); switch (sense_reason) { case TCM_NON_EXISTENT_LUN: @@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work) */ if (cmd->transport_complete_callback) { sense_reason_t rc; + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); + bool zero_dl = !(cmd->data_length); + int post_ret = 0; - rc = cmd->transport_complete_callback(cmd, true); - if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && - !cmd->data_length) + rc = cmd->transport_complete_callback(cmd, true, &post_ret); + if (!rc && !post_ret) { + if (caw && zero_dl) goto queue_rsp; return; @@ -2507,23 +2509,24 @@ out: EXPORT_SYMBOL(target_get_sess_cmd); static void target_release_cmd_kref(struct kref *kref) - __releases(&se_cmd->se_sess->sess_cmd_lock) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; + unsigned long flags; + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) se_cmd->se_tfo->release_cmd(se_cmd); return 1; } - return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, - &se_sess->sess_cmd_lock); + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 937cebf76633..5e6d6cb348fc 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) return 0; - if (!time_after(cmd->deadline, jiffies)) + if (!time_after(jiffies, cmd->deadline)) return 0; set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); @@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd) static const struct target_backend_ops tcmu_ops = { .name = "user", - .inquiry_prod = "USER", - .inquiry_rev = TCMU_VERSION, .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = tcmu_attach_hba, diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c463c89b90ef..8cc4ac64a91c 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -382,7 +382,7 @@ endmenu config QCOM_SPMI_TEMP_ALARM tristate "Qualcomm SPMI PMIC Temperature Alarm" - depends on OF && (SPMI || COMPILE_TEST) && IIO + depends on OF && SPMI && IIO select REGMAP_SPMI help This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c8fe3cac2e0e..c5547bd711db 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -55,6 +55,7 @@ #define TEMPSENSE2_PANIC_VALUE_SHIFT 16 #define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 +#define OCOTP_MEM0 0x0480 #define OCOTP_ANA1 0x04e0 /* The driver supports 1 passive trip point and 1 critical trip point */ @@ -64,12 +65,6 @@ enum imx_thermal_trip { IMX_TRIP_NUM, }; -/* - * It defines the temperature in millicelsius for passive trip point - * that will trigger cooling action when crossed. - */ -#define IMX_TEMP_PASSIVE 85000 - #define IMX_POLLING_DELAY 2000 /* millisecond */ #define IMX_PASSIVE_DELAY 1000 @@ -100,12 +95,14 @@ struct imx_thermal_data { u32 c1, c2; /* See formula in imx_get_sensor_data() */ int temp_passive; int temp_critical; + int temp_max; int alarm_temp; int last_temp; bool irq_enabled; int irq; struct clk *thermal_clk; const struct thermal_soc_data *socdata; + const char *temp_grade; }; static void imx_set_panic_temp(struct imx_thermal_data *data, @@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, { struct imx_thermal_data *data = tz->devdata; + /* do not allow changing critical threshold */ if (trip == IMX_TRIP_CRITICAL) return -EPERM; - if (temp < 0 || temp > IMX_TEMP_PASSIVE) + /* do not allow passive to be set higher than critical */ + if (temp < 0 || temp > data->temp_critical) return -EINVAL; data->temp_passive = temp; @@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev) data->c1 = temp64; data->c2 = n1 * data->c1 + 1000 * t1; - /* - * Set the default passive cooling trip point, - * can be changed from userspace. - */ - data->temp_passive = IMX_TEMP_PASSIVE; + /* use OTP for thermal grade */ + ret = regmap_read(map, OCOTP_MEM0, &val); + if (ret) { + dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret); + return ret; + } + + /* The maximum die temp is specified by the Temperature Grade */ + switch ((val >> 6) & 0x3) { + case 0: /* Commercial (0 to 95C) */ + data->temp_grade = "Commercial"; + data->temp_max = 95000; + break; + case 1: /* Extended Commercial (-20 to 105C) */ + data->temp_grade = "Extended Commercial"; + data->temp_max = 105000; + break; + case 2: /* Industrial (-40 to 105C) */ + data->temp_grade = "Industrial"; + data->temp_max = 105000; + break; + case 3: /* Automotive (-40 to 125C) */ + data->temp_grade = "Automotive"; + data->temp_max = 125000; + break; + } /* - * The maximum die temperature set to 20 C higher than - * IMX_TEMP_PASSIVE. + * Set the critical trip point at 5C under max + * Set the passive trip point at 10C under max (can change via sysfs) */ - data->temp_critical = 1000 * 20 + data->temp_passive; + data->temp_critical = data->temp_max - (1000 * 5); + data->temp_passive = data->temp_max - (1000 * 10); return 0; } @@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev) return ret; } + dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC" + " critical:%dC passive:%dC\n", data->temp_grade, + data->temp_max / 1000, data->temp_critical / 1000, + data->temp_passive / 1000); + /* Enable measurements at ~ 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 42b7d4253b94..be4eedcb839a 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void) np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { - pr_err("unable to find thermal zones\n"); + pr_debug("unable to find thermal zones\n"); return; } diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index f0fbea386869..1246aa6fcab0 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, /** * pid_controller() - PID controller * @tz: thermal zone we are operating in - * @current_temp: the current temperature in millicelsius * @control_temp: the target temperature in millicelsius * @max_allocatable_power: maximum allocatable power for this thermal zone * @@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, * Return: The power budget for the next period. */ static u32 pid_controller(struct thermal_zone_device *tz, - int current_temp, int control_temp, u32 max_allocatable_power) { @@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz, true); } - err = control_temp - current_temp; + err = control_temp - tz->temperature; err = int_to_frac(err); /* Calculate the proportional term */ @@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, } static int allocate_power(struct thermal_zone_device *tz, - int current_temp, int control_temp) { struct thermal_instance *instance; @@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz, i++; } - power_range = pid_controller(tz, current_temp, control_temp, - max_allocatable_power); + power_range = pid_controller(tz, control_temp, max_allocatable_power); divvy_up_power(weighted_req_power, max_power, num_actors, total_weighted_req_power, power_range, granted_power, @@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz, trace_thermal_power_allocator(tz, req_power, total_req_power, granted_power, total_granted_power, num_actors, power_range, - max_allocatable_power, current_temp, - control_temp - current_temp); + max_allocatable_power, tz->temperature, + control_temp - tz->temperature); kfree(req_power); unlock: @@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz) static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) { int ret; - int switch_on_temp, control_temp, current_temp; + int switch_on_temp, control_temp; struct power_allocator_params *params = tz->governor_data; /* @@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) if (trip != params->trip_max_desired_temperature) return 0; - ret = thermal_zone_get_temp(tz, ¤t_temp); - if (ret) { - dev_warn(&tz->device, "Failed to get temperature: %d\n", ret); - return ret; - } - ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, &switch_on_temp); - if (!ret && (current_temp < switch_on_temp)) { + if (!ret && (tz->temperature < switch_on_temp)) { tz->passive = 0; reset_pid_controller(params); allow_maximum_power(tz); @@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) return ret; } - return allocate_power(tz, current_temp, control_temp); + return allocate_power(tz, control_temp); } static struct thermal_governor thermal_gov_power_allocator = { diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 5d4ae7d705e0..13d01edc7a04 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) /* * platform functions */ +static int rcar_thermal_remove(struct platform_device *pdev) +{ + struct rcar_thermal_common *common = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + struct rcar_thermal_priv *priv; + + rcar_thermal_for_each_priv(priv, common) { + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + thermal_zone_device_unregister(priv->zone); + } + + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + static int rcar_thermal_probe(struct platform_device *pdev) { struct rcar_thermal_common *common; @@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (!common) return -ENOMEM; + platform_set_drvdata(pdev, common); + INIT_LIST_HEAD(&common->head); spin_lock_init(&common->lock); common->dev = dev; @@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev) rcar_thermal_common_write(common, ENR, enr_bits); } - platform_set_drvdata(pdev, common); - dev_info(dev, "%d sensor probed\n", i); return 0; error_unregister: - rcar_thermal_for_each_priv(priv, common) { - if (rcar_has_irq_support(priv)) - rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); - } - - pm_runtime_put(dev); - pm_runtime_disable(dev); + rcar_thermal_remove(pdev); return ret; } -static int rcar_thermal_remove(struct platform_device *pdev) -{ - struct rcar_thermal_common *common = platform_get_drvdata(pdev); - struct device *dev = &pdev->dev; - struct rcar_thermal_priv *priv; - - rcar_thermal_for_each_priv(priv, common) { - if (rcar_has_irq_support(priv)) - rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); - } - - pm_runtime_put(dev); - pm_runtime_disable(dev); - - return 0; -} - static const struct of_device_id rcar_thermal_dt_ids[] = { { .compatible = "renesas,rcar-thermal", }, {}, diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 9787e8aa509f..e845841ab036 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1,6 +1,9 @@ /* * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * Caesar Wang <wxt@rock-chips.com> + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -45,17 +48,50 @@ enum tshut_polarity { }; /** - * The system has three Temperature Sensors. channel 0 is reserved, - * channel 1 is for CPU, and channel 2 is for GPU. + * The system has two Temperature Sensors. + * sensor0 is for CPU, and sensor1 is for GPU. */ enum sensor_id { - SENSOR_CPU = 1, + SENSOR_CPU = 0, SENSOR_GPU, }; +/** +* The conversion table has the adc value and temperature. +* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table) +* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table) +*/ +enum adc_sort_mode { + ADC_DECREMENT = 0, + ADC_INCREMENT, +}; + +/** + * The max sensors is two in rockchip SoCs. + * Two sensors: CPU and GPU sensor. + */ +#define SOC_MAX_SENSORS 2 + +struct chip_tsadc_table { + const struct tsadc_table *id; + + /* the array table size*/ + unsigned int length; + + /* that analogic mask data */ + u32 data_mask; + + /* the sort mode is adc value that increment or decrement in table */ + enum adc_sort_mode mode; +}; + struct rockchip_tsadc_chip { + /* The sensor id of chip correspond to the ADC channel */ + int chn_id[SOC_MAX_SENSORS]; + int chn_num; + /* The hardware-controlled tshut property */ - long tshut_temp; + int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; @@ -65,37 +101,40 @@ struct rockchip_tsadc_chip { void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ - int (*get_temp)(int chn, void __iomem *reg, int *temp); - void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); + int (*get_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int *temp); + void (*set_tshut_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); + + /* Per-table methods */ + struct chip_tsadc_table table; }; struct rockchip_thermal_sensor { struct rockchip_thermal_data *thermal; struct thermal_zone_device *tzd; - enum sensor_id id; + int id; }; -#define NUM_SENSORS 2 /* Ignore unused sensor 0 */ - struct rockchip_thermal_data { const struct rockchip_tsadc_chip *chip; struct platform_device *pdev; struct reset_control *reset; - struct rockchip_thermal_sensor sensors[NUM_SENSORS]; + struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS]; struct clk *clk; struct clk *pclk; void __iomem *regs; - long tshut_temp; + int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; }; -/* TSADC V2 Sensor info define: */ +/* TSADC Sensor info define: */ #define TSADCV2_AUTO_CON 0x04 #define TSADCV2_INT_EN 0x08 #define TSADCV2_INT_PD 0x0c @@ -117,6 +156,8 @@ struct rockchip_thermal_data { #define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) #define TSADCV2_DATA_MASK 0xfff +#define TSADCV3_DATA_MASK 0x3ff + #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 #define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ @@ -124,7 +165,7 @@ struct rockchip_thermal_data { struct tsadc_table { u32 code; - long temp; + int temp; }; static const struct tsadc_table v2_code_table[] = { @@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = { {3421, 125000}, }; -static u32 rk_tsadcv2_temp_to_code(long temp) +static const struct tsadc_table v3_code_table[] = { + {0, -40000}, + {106, -40000}, + {108, -35000}, + {110, -30000}, + {112, -25000}, + {114, -20000}, + {116, -15000}, + {118, -10000}, + {120, -5000}, + {122, 0}, + {124, 5000}, + {126, 10000}, + {128, 15000}, + {130, 20000}, + {132, 25000}, + {134, 30000}, + {136, 35000}, + {138, 40000}, + {140, 45000}, + {142, 50000}, + {144, 55000}, + {146, 60000}, + {148, 65000}, + {150, 70000}, + {152, 75000}, + {154, 80000}, + {156, 85000}, + {158, 90000}, + {160, 95000}, + {162, 100000}, + {163, 105000}, + {165, 110000}, + {167, 115000}, + {169, 120000}, + {171, 125000}, + {TSADCV3_DATA_MASK, 125000}, +}; + +static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, + int temp) { int high, low, mid; low = 0; - high = ARRAY_SIZE(v2_code_table) - 1; + high = table.length - 1; mid = (high + low) / 2; - if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) + if (temp < table.id[low].temp || temp > table.id[high].temp) return 0; while (low <= high) { - if (temp == v2_code_table[mid].temp) - return v2_code_table[mid].code; - else if (temp < v2_code_table[mid].temp) + if (temp == table.id[mid].temp) + return table.id[mid].code; + else if (temp < table.id[mid].temp) high = mid - 1; else low = mid + 1; @@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp) return 0; } -static int rk_tsadcv2_code_to_temp(u32 code, int *temp) +static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, + int *temp) { unsigned int low = 1; - unsigned int high = ARRAY_SIZE(v2_code_table) - 1; + unsigned int high = table.length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; - BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); - - code &= TSADCV2_DATA_MASK; - if (code < v2_code_table[high].code) - return -EAGAIN; /* Incorrect reading */ - - while (low <= high) { - if (code >= v2_code_table[mid].code && - code < v2_code_table[mid - 1].code) - break; - else if (code < v2_code_table[mid].code) - low = mid + 1; - else - high = mid - 1; - mid = (low + high) / 2; + WARN_ON(table.length < 2); + + switch (table.mode) { + case ADC_DECREMENT: + code &= table.data_mask; + if (code < table.id[high].code) + return -EAGAIN; /* Incorrect reading */ + + while (low <= high) { + if (code >= table.id[mid].code && + code < table.id[mid - 1].code) + break; + else if (code < table.id[mid].code) + low = mid + 1; + else + high = mid - 1; + + mid = (low + high) / 2; + } + break; + case ADC_INCREMENT: + code &= table.data_mask; + if (code < table.id[low].code) + return -EAGAIN; /* Incorrect reading */ + + while (low <= high) { + if (code >= table.id[mid - 1].code && + code < table.id[mid].code) + break; + else if (code > table.id[mid].code) + low = mid + 1; + else + high = mid - 1; + + mid = (low + high) / 2; + } + break; + default: + pr_err("Invalid the conversion table\n"); } /* @@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp) * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ - num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; - num *= v2_code_table[mid - 1].code - code; - denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; - *temp = v2_code_table[mid - 1].temp + (num / denom); + num = table.id[mid].temp - v2_code_table[mid - 1].temp; + num *= abs(table.id[mid - 1].code - code); + denom = abs(table.id[mid - 1].code - table.id[mid].code); + *temp = table.id[mid - 1].temp + (num / denom); return 0; } /** - * rk_tsadcv2_initialize - initialize TASDC Controller - * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between - * every two accessing of TSADC in normal operation. - * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between - * every two accessing of TSADC after the temperature is higher - * than COM_SHUT or COM_INT. - * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, - * if the temperature is higher than COMP_INT or COMP_SHUT for - * "debounce" times, TSADC controller will generate interrupt or TSHUT. + * rk_tsadcv2_initialize - initialize TASDC Controller. + * + * (1) Set TSADC_V2_AUTO_PERIOD: + * Configure the interleave between every two accessing of + * TSADC in normal operation. + * + * (2) Set TSADCV2_AUTO_PERIOD_HT: + * Configure the interleave between every two accessing of + * TSADC after the temperature is higher than COM_SHUT or COM_INT. + * + * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: + * If the temperature is higher than COMP_INT or COMP_SHUT for + * "debounce" times, TSADC controller will generate interrupt or TSHUT. */ static void rk_tsadcv2_initialize(void __iomem *regs, enum tshut_polarity tshut_polarity) @@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable) writel_relaxed(val, regs + TSADCV2_AUTO_CON); } -static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) +static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int *temp) { u32 val; val = readl_relaxed(regs + TSADCV2_DATA(chn)); - return rk_tsadcv2_code_to_temp(val, temp); + return rk_tsadcv2_code_to_temp(table, val, temp); } -static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) +static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int temp) { u32 tshut_value, val; - tshut_value = rk_tsadcv2_temp_to_code(temp); + tshut_value = rk_tsadcv2_temp_to_code(table, temp); writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ @@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, } static const struct rockchip_tsadc_chip rk3288_tsadc_data = { + .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */ + .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */ + .chn_num = 2, /* two channels for tsadc */ + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, @@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { .get_temp = rk_tsadcv2_get_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = v2_code_table, + .length = ARRAY_SIZE(v2_code_table), + .data_mask = TSADCV2_DATA_MASK, + .mode = ADC_DECREMENT, + }, +}; + +static const struct rockchip_tsadc_chip rk3368_tsadc_data = { + .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ + .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ + .chn_num = 2, /* two channels for tsadc */ + + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ + .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ + .tshut_temp = 95000, + + .initialize = rk_tsadcv2_initialize, + .irq_ack = rk_tsadcv2_irq_ack, + .control = rk_tsadcv2_control, + .get_temp = rk_tsadcv2_get_temp, + .set_tshut_temp = rk_tsadcv2_tshut_temp, + .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = v3_code_table, + .length = ARRAY_SIZE(v3_code_table), + .data_mask = TSADCV3_DATA_MASK, + .mode = ADC_INCREMENT, + }, }; static const struct of_device_id of_rockchip_thermal_match[] = { @@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { .compatible = "rockchip,rk3288-tsadc", .data = (void *)&rk3288_tsadc_data, }, + { + .compatible = "rockchip,rk3368-tsadc", + .data = (void *)&rk3368_tsadc_data, + }, { /* end */ }, }; MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); @@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) thermal->chip->irq_ack(thermal->regs); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) thermal_zone_device_update(thermal->sensors[i].tzd); return IRQ_HANDLED; @@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; - retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); + retval = tsadc->get_temp(tsadc->table, + sensor->id, thermal->regs, out_temp); dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", sensor->id, *out_temp, retval); @@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev, if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { dev_warn(dev, - "Missing tshut temp property, using default %ld\n", + "Missing tshut temp property, using default %d\n", thermal->chip->tshut_temp); thermal->tshut_temp = thermal->chip->tshut_temp; } else { @@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev, } if (thermal->tshut_temp > INT_MAX) { - dev_err(dev, "Invalid tshut temperature specified: %ld\n", + dev_err(dev, "Invalid tshut temperature specified: %d\n", thermal->tshut_temp); return -ERANGE; } @@ -442,13 +594,14 @@ static int rockchip_thermal_register_sensor(struct platform_device *pdev, struct rockchip_thermal_data *thermal, struct rockchip_thermal_sensor *sensor, - enum sensor_id id) + int id) { const struct rockchip_tsadc_chip *tsadc = thermal->chip; int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); + tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, + thermal->tshut_temp); sensor->thermal = thermal; sensor->id = id; @@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) const struct of_device_id *match; struct resource *res; int irq; - int i; + int i, j; int error; match = of_match_node(of_rockchip_thermal_match, np); @@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev) thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); - error = rockchip_thermal_register_sensor(pdev, thermal, - &thermal->sensors[0], - SENSOR_CPU); - if (error) { - dev_err(&pdev->dev, - "failed to register CPU thermal sensor: %d\n", error); - goto err_disable_pclk; - } - - error = rockchip_thermal_register_sensor(pdev, thermal, - &thermal->sensors[1], - SENSOR_GPU); - if (error) { - dev_err(&pdev->dev, - "failed to register GPU thermal sensor: %d\n", error); - goto err_unregister_cpu_sensor; + for (i = 0; i < thermal->chip->chn_num; i++) { + error = rockchip_thermal_register_sensor(pdev, thermal, + &thermal->sensors[i], + thermal->chip->chn_id[i]); + if (error) { + dev_err(&pdev->dev, + "failed to register sensor[%d] : error = %d\n", + i, error); + for (j = 0; j < i; j++) + thermal_zone_of_sensor_unregister(&pdev->dev, + thermal->sensors[j].tzd); + goto err_disable_pclk; + } } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, @@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev) if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); - goto err_unregister_gpu_sensor; + goto err_unregister_sensor; } thermal->chip->control(thermal->regs, true); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); platform_set_drvdata(pdev, thermal); return 0; -err_unregister_gpu_sensor: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); -err_unregister_cpu_sensor: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); +err_unregister_sensor: + while (i--) + thermal_zone_of_sensor_unregister(&pdev->dev, + thermal->sensors[i].tzd); + err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: @@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev) struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { + for (i = 0; i < thermal->chip->chn_num; i++) { struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; rockchip_thermal_toggle_sensor(sensor, false); @@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev) struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); thermal->chip->control(thermal->regs, false); @@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { - enum sensor_id id = thermal->sensors[i].id; + for (i = 0; i < thermal->chip->chn_num; i++) { + int id = thermal->sensors[i].id; thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - thermal->chip->set_tshut_temp(id, thermal->regs, + thermal->chip->set_tshut_temp(thermal->chip->table, + id, thermal->regs, thermal->tshut_temp); } thermal->chip->control(thermal->regs, true); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); pinctrl_pm_select_default_state(dev); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 7a8a6c6952e9..1c427beffadd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG config IMX2_WDT tristate "IMX2+ Watchdog" - depends on ARCH_MXC + depends on ARCH_MXC || ARCH_LAYERSCAPE select REGMAP_MMIO select WATCHDOG_CORE help diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 6ad9df948711..b751f43d76ed 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev) reg = readl(wdt_base + WDT_MODE); reg &= ~WDT_MODE_EN; + reg |= WDT_MODE_KEY; iowrite32(reg, wdt_base + WDT_MODE); return 0; diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index d96bee017fd3..6f17c935a6cf 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog, static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); void __iomem *base = wdev->base; u32 value; diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 4224b3ec83a5..313cd1c6fda0 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT; static DEFINE_SPINLOCK(io_lock); static void __iomem *wdt_base; -struct clk *wdt_clk; +static struct clk *wdt_clk; static int pnx4008_wdt_start(struct watchdog_device *wdd) { @@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdt_clk)) return PTR_ERR(wdt_clk); - ret = clk_enable(wdt_clk); + ret = clk_prepare_enable(wdt_clk); if (ret) return ret; @@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) return 0; disable_clk: - clk_disable(wdt_clk); + clk_disable_unprepare(wdt_clk); return ret; } @@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev) { watchdog_unregister_device(&pnx4008_wdd); - clk_disable(wdt_clk); + clk_disable_unprepare(wdt_clk); return 0; } diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c index 7f97cdd53f29..9ec57608da82 100644 --- a/drivers/watchdog/tegra_wdt.c +++ b/drivers/watchdog/tegra_wdt.c @@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd, { wdd->timeout = timeout; - if (watchdog_active(wdd)) + if (watchdog_active(wdd)) { + tegra_wdt_stop(wdd); return tegra_wdt_start(wdd); + } return 0; } diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 91bf55a20024..20e2bba10400 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -224,7 +224,7 @@ static int wdt_keepalive(void) static int wdt_set_timeout(int t) { - int tmrval; + unsigned int tmrval; /* * Convert seconds to watchdog counter time units, rounding up. diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 849500e4e14d..524c22146429 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -39,6 +39,7 @@ #include <asm/irq.h> #include <asm/idle.h> #include <asm/io_apic.h> +#include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> @@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ - if (gsi < NR_IRQS_LEGACY) + if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); @@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq) kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < NR_IRQS_LEGACY) + if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 00f40f051d95..38272ad24551 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -49,6 +49,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/cpu.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> #include <xen/xen.h> #include <xen/events.h> @@ -58,10 +60,10 @@ struct per_user_data { struct mutex bind_mutex; /* serialize bind/unbind operations */ struct rb_root evtchns; + unsigned int nr_evtchns; /* Notification ring, accessed via /dev/xen/evtchn. */ -#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) -#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) + unsigned int ring_size; evtchn_port_t *ring; unsigned int ring_cons, ring_prod, ring_overflow; struct mutex ring_cons_mutex; /* protect against concurrent readers */ @@ -80,10 +82,41 @@ struct user_evtchn { bool enabled; }; +static evtchn_port_t *evtchn_alloc_ring(unsigned int size) +{ + evtchn_port_t *ring; + size_t s = size * sizeof(*ring); + + ring = kmalloc(s, GFP_KERNEL); + if (!ring) + ring = vmalloc(s); + + return ring; +} + +static void evtchn_free_ring(evtchn_port_t *ring) +{ + kvfree(ring); +} + +static unsigned int evtchn_ring_offset(struct per_user_data *u, + unsigned int idx) +{ + return idx & (u->ring_size - 1); +} + +static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, + unsigned int idx) +{ + return u->ring + evtchn_ring_offset(u, idx); +} + static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) { struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; + u->nr_evtchns++; + while (*new) { struct user_evtchn *this; @@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) { + u->nr_evtchns--; rb_erase(&evtchn->node, &u->evtchns); kfree(evtchn); } @@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) spin_lock(&u->ring_prod_lock); - if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { - u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; + if ((u->ring_prod - u->ring_cons) < u->ring_size) { + *evtchn_ring_entry(u, u->ring_prod) = evtchn->port; wmb(); /* Ensure ring contents visible */ if (u->ring_cons == u->ring_prod++) { wake_up_interruptible(&u->evtchn_wait); @@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, } /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ - if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { - bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * + if (((c ^ p) & u->ring_size) != 0) { + bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * sizeof(evtchn_port_t); - bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); + bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); } else { bytes1 = (p - c) * sizeof(evtchn_port_t); bytes2 = 0; @@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, rc = -EFAULT; rmb(); /* Ensure that we see the port before we copy it. */ - if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || + if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) goto unlock_out; @@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, return rc; } +static int evtchn_resize_ring(struct per_user_data *u) +{ + unsigned int new_size; + evtchn_port_t *new_ring, *old_ring; + unsigned int p, c; + + /* + * Ensure the ring is large enough to capture all possible + * events. i.e., one free slot for each bound event. + */ + if (u->nr_evtchns <= u->ring_size) + return 0; + + if (u->ring_size == 0) + new_size = 64; + else + new_size = 2 * u->ring_size; + + new_ring = evtchn_alloc_ring(new_size); + if (!new_ring) + return -ENOMEM; + + old_ring = u->ring; + + /* + * Access to the ring contents is serialized by either the + * prod /or/ cons lock so take both when resizing. + */ + mutex_lock(&u->ring_cons_mutex); + spin_lock_irq(&u->ring_prod_lock); + + /* + * Copy the old ring contents to the new ring. + * + * If the ring contents crosses the end of the current ring, + * it needs to be copied in two chunks. + * + * +---------+ +------------------+ + * |34567 12| -> | 1234567 | + * +-----p-c-+ +------------------+ + */ + p = evtchn_ring_offset(u, u->ring_prod); + c = evtchn_ring_offset(u, u->ring_cons); + if (p < c) { + memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); + memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); + } else + memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); + + u->ring = new_ring; + u->ring_size = new_size; + + spin_unlock_irq(&u->ring_prod_lock); + mutex_unlock(&u->ring_cons_mutex); + + evtchn_free_ring(old_ring); + + return 0; +} + static int evtchn_bind_to_user(struct per_user_data *u, int port) { struct user_evtchn *evtchn; @@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) if (rc < 0) goto err; + rc = evtchn_resize_ring(u); + if (rc < 0) + goto err; + rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, u->name, evtchn); if (rc < 0) @@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp) init_waitqueue_head(&u->evtchn_wait); - u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); - if (u->ring == NULL) { - kfree(u->name); - kfree(u); - return -ENOMEM; - } - mutex_init(&u->bind_mutex); mutex_init(&u->ring_cons_mutex); spin_lock_init(&u->ring_prod_lock); @@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp) evtchn_unbind_from_user(u, evtchn); } - free_page((unsigned long)u->ring); + evtchn_free_ring(u->ring); kfree(u->name); kfree(u); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ea0b3b2a91d..1be5dd048622 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; if (use_ptemod) vma->vm_flags |= VM_DONTCOPY; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6dcdb2ec9211..d453d62ab0c6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -355,7 +355,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, index = srcu_read_lock(&fs_info->subvol_srcu); - root = btrfs_read_fs_root_no_name(fs_info, &root_key); + root = btrfs_get_fs_root(fs_info, &root_key, false); if (IS_ERR(root)) { srcu_read_unlock(&fs_info->subvol_srcu, index); ret = PTR_ERR(root); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8c58191249cc..35489e7129a7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3416,6 +3416,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); +void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int get_block_group_index(struct btrfs_block_group_cache *cache); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, @@ -3479,6 +3480,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytes_used, u64 type, u64 chunk_objectid, u64 chunk_offset, u64 size); +struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( + struct btrfs_fs_info *fs_info, + const u64 chunk_offset); int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 group_start, struct extent_map *em); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index acf3ed11cfb6..4b89680a1923 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -124,7 +124,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } -static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { atomic_inc(&cache->count); } @@ -5915,19 +5915,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, set_extent_dirty(info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); - /* - * No longer have used bytes in this block group, queue - * it for deletion. - */ - if (old_val == 0) { - spin_lock(&info->unused_bgs_lock); - if (list_empty(&cache->bg_list)) { - btrfs_get_block_group(cache); - list_add_tail(&cache->bg_list, - &info->unused_bgs); - } - spin_unlock(&info->unused_bgs_lock); - } } spin_lock(&trans->transaction->dirty_bgs_lock); @@ -5939,6 +5926,22 @@ static int update_block_group(struct btrfs_trans_handle *trans, } spin_unlock(&trans->transaction->dirty_bgs_lock); + /* + * No longer have used bytes in this block group, queue it for + * deletion. We do this after adding the block group to the + * dirty list to avoid races between cleaner kthread and space + * cache writeout. + */ + if (!alloc && old_val == 0) { + spin_lock(&info->unused_bgs_lock); + if (list_empty(&cache->bg_list)) { + btrfs_get_block_group(cache); + list_add_tail(&cache->bg_list, + &info->unused_bgs); + } + spin_unlock(&info->unused_bgs_lock); + } + btrfs_put_block_group(cache); total -= num_bytes; bytenr += num_bytes; @@ -8105,21 +8108,47 @@ reada: } /* - * TODO: Modify related function to add related node/leaf to dirty_extent_root, - * for later qgroup accounting. - * - * Current, this function does nothing. + * These may not be seen by the usual inc/dec ref code so we have to + * add them here. */ +static int record_one_subtree_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes) +{ + struct btrfs_qgroup_extent_record *qrecord; + struct btrfs_delayed_ref_root *delayed_refs; + + qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS); + if (!qrecord) + return -ENOMEM; + + qrecord->bytenr = bytenr; + qrecord->num_bytes = num_bytes; + qrecord->old_roots = NULL; + + delayed_refs = &trans->transaction->delayed_refs; + spin_lock(&delayed_refs->lock); + if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord)) + kfree(qrecord); + spin_unlock(&delayed_refs->lock); + + return 0; +} + static int account_leaf_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb) { int nr = btrfs_header_nritems(eb); - int i, extent_type; + int i, extent_type, ret; struct btrfs_key key; struct btrfs_file_extent_item *fi; u64 bytenr, num_bytes; + /* We can be called directly from walk_up_proc() */ + if (!root->fs_info->quota_enabled) + return 0; + for (i = 0; i < nr; i++) { btrfs_item_key_to_cpu(eb, &key, i); @@ -8138,6 +8167,10 @@ static int account_leaf_items(struct btrfs_trans_handle *trans, continue; num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); + + ret = record_one_subtree_extent(trans, root, bytenr, num_bytes); + if (ret) + return ret; } return 0; } @@ -8206,8 +8239,6 @@ static int adjust_slots_upwards(struct btrfs_root *root, /* * root_eb is the subtree root and is locked before this function is called. - * TODO: Modify this function to mark all (including complete shared node) - * to dirty_extent_root to allow it get accounted in qgroup. */ static int account_shared_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -8285,6 +8316,11 @@ walk_down: btrfs_tree_read_lock(eb); btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); path->locks[level] = BTRFS_READ_LOCK_BLOCKING; + + ret = record_one_subtree_extent(trans, root, child_bytenr, + root->nodesize); + if (ret) + goto out; } if (level == 0) { @@ -10256,6 +10292,47 @@ out: return ret; } +struct btrfs_trans_handle * +btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info, + const u64 chunk_offset) +{ + struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; + struct extent_map *em; + struct map_lookup *map; + unsigned int num_items; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, chunk_offset, 1); + read_unlock(&em_tree->lock); + ASSERT(em && em->start == chunk_offset); + + /* + * We need to reserve 3 + N units from the metadata space info in order + * to remove a block group (done at btrfs_remove_chunk() and at + * btrfs_remove_block_group()), which are used for: + * + * 1 unit for adding the free space inode's orphan (located in the tree + * of tree roots). + * 1 unit for deleting the block group item (located in the extent + * tree). + * 1 unit for deleting the free space item (located in tree of tree + * roots). + * N units for deleting N device extent items corresponding to each + * stripe (located in the device tree). + * + * In order to remove a block group we also need to reserve units in the + * system space info in order to update the chunk tree (update one or + * more device items and remove one chunk item), but this is done at + * btrfs_remove_chunk() through a call to check_system_chunk(). + */ + map = (struct map_lookup *)em->bdev; + num_items = 3 + map->num_stripes; + free_extent_map(em); + + return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, + num_items, 1); +} + /* * Process the unused_bgs list and remove any that don't have any allocated * space inside of them. @@ -10322,8 +10399,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Want to do this before we do anything else so we can recover * properly if we fail to join the transaction. */ - /* 1 for btrfs_orphan_reserve_metadata() */ - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_trans_remove_block_group(fs_info, + block_group->key.objectid); if (IS_ERR(trans)) { btrfs_dec_block_group_ro(root, block_group); ret = PTR_ERR(trans); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 977e715f0bf2..72e73461c064 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1882,8 +1882,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) struct btrfs_log_ctx ctx; int ret = 0; bool full_sync = 0; - const u64 len = end - start + 1; + u64 len; + /* + * The range length can be represented by u64, we have to do the typecasts + * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() + */ + len = (u64)end - (u64)start + 1; trace_btrfs_sync_file(file, datasync); /* @@ -2071,8 +2076,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) } } if (!full_sync) { - ret = btrfs_wait_ordered_range(inode, start, - end - start + 1); + ret = btrfs_wait_ordered_range(inode, start, len); if (ret) { btrfs_end_transaction(trans, root); goto out; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 994490d5fa64..a70c5790f8f5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4046,9 +4046,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, */ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) { - struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - int ret; /* * 1 for the possible orphan item @@ -4057,27 +4055,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) * 1 for the inode ref * 1 for the inode */ - trans = btrfs_start_transaction(root, 5); - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) - return trans; - - if (PTR_ERR(trans) == -ENOSPC) { - u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); - - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return trans; - ret = btrfs_cond_migrate_bytes(root->fs_info, - &root->fs_info->trans_block_rsv, - num_bytes, 5); - if (ret) { - btrfs_end_transaction(trans, root); - return ERR_PTR(ret); - } - trans->block_rsv = &root->fs_info->trans_block_rsv; - trans->bytes_reserved = num_bytes; - } - return trans; + return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); } static int btrfs_unlink(struct inode *dir, struct dentry *dentry) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 93e12c18ffd7..5279fdae7142 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -993,9 +993,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; - spin_lock(&fs_info->qgroup_lock); fs_info->quota_enabled = 0; fs_info->pending_quota_state = 0; + btrfs_qgroup_wait_for_completion(fs_info); + spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; fs_info->quota_root = NULL; fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; @@ -1461,6 +1462,8 @@ struct btrfs_qgroup_extent_record struct btrfs_qgroup_extent_record *entry; u64 bytenr = record->bytenr; + assert_spin_locked(&delayed_refs->lock); + while (*p) { parent_node = *p; entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2907a77fb1f6..b091d94ceef6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3432,7 +3432,9 @@ out: static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, - u64 dev_offset, int is_dev_replace) + u64 dev_offset, + struct btrfs_block_group_cache *cache, + int is_dev_replace) { struct btrfs_mapping_tree *map_tree = &sctx->dev_root->fs_info->mapping_tree; @@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); - if (!em) - return -EINVAL; + if (!em) { + /* + * Might have been an unused block group deleted by the cleaner + * kthread or relocation. + */ + spin_lock(&cache->lock); + if (!cache->removed) + ret = -EINVAL; + spin_unlock(&cache->lock); + + return ret; + } map = (struct map_lookup *)em->bdev; if (em->start != chunk_offset) @@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, u64 length; u64 chunk_offset; int ret = 0; + int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; @@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(root, cache); scrub_pause_off(fs_info); - if (ret) { + + if (ret == 0) { + ro_set = 1; + } else if (ret == -ENOSPC) { + /* + * btrfs_inc_block_group_ro return -ENOSPC when it + * failed in creating new chunk for metadata. + * It is not a problem for scrub/replace, because + * metadata are always cowed, and our scrub paused + * commit_transactions. + */ + ro_set = 0; + } else { + btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n", + ret); btrfs_put_block_group(cache); break; } @@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, - found_key.offset, is_dev_replace); + found_key.offset, cache, is_dev_replace); /* * flush, submit all pending read and write bios, afterwards @@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_off(fs_info); - btrfs_dec_block_group_ro(root, cache); + if (ro_set) + btrfs_dec_block_group_ro(root, cache); + + /* + * We might have prevented the cleaner kthread from deleting + * this block group if it was already unused because we raced + * and set it to RO mode first. So add it back to the unused + * list, otherwise it might not ever be deleted unless a manual + * balance is triggered or it becomes used and unused again. + */ + spin_lock(&cache->lock); + if (!cache->removed && !cache->ro && cache->reserved == 0 && + btrfs_block_group_used(&cache->item) == 0) { + spin_unlock(&cache->lock); + spin_lock(&fs_info->unused_bgs_lock); + if (list_empty(&cache->bg_list)) { + btrfs_get_block_group(cache); + list_add_tail(&cache->bg_list, + &fs_info->unused_bgs); + } + spin_unlock(&fs_info->unused_bgs_lock); + } else { + spin_unlock(&cache->lock); + } btrfs_put_block_group(cache); if (ret) diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index c8c3d70c31ff..8b72b005bfb9 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -898,8 +898,10 @@ int btrfs_test_free_space_cache(void) } root = btrfs_alloc_dummy_root(); - if (!root) + if (IS_ERR(root)) { + ret = PTR_ERR(root); goto out; + } root->fs_info = btrfs_alloc_dummy_fs_info(); if (!root->fs_info) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 418c6a2ad7d8..3367a3c6f214 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -592,6 +592,38 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, return start_transaction(root, num_items, TRANS_START, BTRFS_RESERVE_FLUSH_ALL); } +struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( + struct btrfs_root *root, + unsigned int num_items, + int min_factor) +{ + struct btrfs_trans_handle *trans; + u64 num_bytes; + int ret; + + trans = btrfs_start_transaction(root, num_items); + if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) + return trans; + + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return trans; + + num_bytes = btrfs_calc_trans_metadata_size(root, num_items); + ret = btrfs_cond_migrate_bytes(root->fs_info, + &root->fs_info->trans_block_rsv, + num_bytes, + min_factor); + if (ret) { + btrfs_end_transaction(trans, root); + return ERR_PTR(ret); + } + + trans->block_rsv = &root->fs_info->trans_block_rsv; + trans->bytes_reserved = num_bytes; + + return trans; +} struct btrfs_trans_handle *btrfs_start_transaction_lflush( struct btrfs_root *root, diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index b05b2f64d913..0da21ca9b3fb 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -185,6 +185,10 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items); +struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( + struct btrfs_root *root, + unsigned int num_items, + int min_factor); struct btrfs_trans_handle *btrfs_start_transaction_lflush( struct btrfs_root *root, unsigned int num_items); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a6df8fdc1312..456452206609 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1973,8 +1973,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, if (srcdev->writeable) { fs_devices->rw_devices--; /* zero out the old super if it is writable */ - btrfs_scratch_superblocks(srcdev->bdev, - rcu_str_deref(srcdev->name)); + btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); } if (srcdev->bdev) @@ -2024,8 +2023,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); if (tgtdev->bdev) { - btrfs_scratch_superblocks(tgtdev->bdev, - rcu_str_deref(tgtdev->name)); + btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); fs_info->fs_devices->open_devices--; } fs_info->fs_devices->num_devices--; @@ -2853,7 +2851,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset) if (ret) return ret; - trans = btrfs_start_transaction(root, 0); + trans = btrfs_start_trans_remove_block_group(root->fs_info, + chunk_offset); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_std_error(root->fs_info, ret, NULL); @@ -3123,7 +3122,7 @@ static int chunk_profiles_filter(u64 chunk_type, return 1; } -static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, +static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; @@ -3156,7 +3155,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, return ret; } -static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, +static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ec5712372732..d5c84f6b1353 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -382,7 +382,7 @@ struct map_lookup { #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) #define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) #define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) -#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 8) +#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10) #define BTRFS_BALANCE_ARGS_MASK \ (BTRFS_BALANCE_ARGS_PROFILES | \ diff --git a/fs/direct-io.c b/fs/direct-io.c index cb5337d8c273..1c75a3a07f8f 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1169,6 +1169,15 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, } } + /* Once we sampled i_size check for reads beyond EOF */ + dio->i_size = i_size_read(inode); + if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { + if (dio->flags & DIO_LOCKING) + mutex_unlock(&inode->i_mutex); + kmem_cache_free(dio_cache, dio); + goto out; + } + /* * For file extending writes updating i_size before data writeouts * complete can expose uninitialized blocks in dumb filesystems. @@ -1222,7 +1231,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, sdio.next_block_for_io = -1; dio->iocb = iocb; - dio->i_size = i_size_read(inode); spin_lock_init(&dio->bio_lock); dio->refcount = 1; diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 87e9d796cf7d..3a37bd3f9637 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -421,7 +421,7 @@ static void lowcomms_write_space(struct sock *sk) if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { con->sock->sk->sk_write_pending--; - clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); } if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) @@ -1448,7 +1448,7 @@ static void send_to_sock(struct connection *con) msg_flags); if (ret == -EAGAIN || ret == 0) { if (ret == -EAGAIN && - test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && + test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { /* Notify TCP that we're limited by the * application window size. diff --git a/fs/namei.c b/fs/namei.c index d84d7c7515fc..0c3974cd3ecd 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1996,7 +1996,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags) nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; - nd->total_link_count = 0; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 646cdac73488..beac58b0e09c 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -78,7 +78,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) p = xdr_inline_decode(xdr, nbytes); if (unlikely(p == NULL)) - printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n"); + printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed " + "or truncated request.\n"); return p; } @@ -889,6 +890,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r struct cb_compound_hdr_arg hdr_arg = { 0 }; struct cb_compound_hdr_res hdr_res = { NULL }; struct xdr_stream xdr_in, xdr_out; + struct xdr_buf *rq_arg = &rqstp->rq_arg; __be32 *p, status; struct cb_process_state cps = { .drc_status = 0, @@ -900,7 +902,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r dprintk("%s: start\n", __func__); - xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); + rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len; + xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base); p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); xdr_init_encode(&xdr_out, &rqstp->rq_res, p); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 326d9e10d833..31b0a52223a7 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -618,7 +618,10 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); nfs_vmtruncate(inode, attr->ia_size); } - nfs_update_inode(inode, fattr); + if (fattr->valid) + nfs_update_inode(inode, fattr); + else + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; spin_unlock(&inode->i_lock); } EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); @@ -1824,7 +1827,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) nfsi->attr_gencount = fattr->gencount; } - invalid &= ~NFS_INO_INVALID_ATTR; + + /* Don't declare attrcache up to date if there were no attrs! */ + if (fattr->valid != 0) + invalid &= ~NFS_INO_INVALID_ATTR; + /* Don't invalidate the data if we were to blame */ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 3e92a3cde15d..6b1ce9825430 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -14,7 +14,7 @@ #include "pnfs.h" #include "internal.h" -#define NFSDBG_FACILITY NFSDBG_PNFS +#define NFSDBG_FACILITY NFSDBG_PROC static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, fmode_t fmode) @@ -284,6 +284,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, .dst_fh = NFS_FH(dst_inode), .src_offset = src_offset, .dst_offset = dst_offset, + .count = count, .dst_bitmask = server->cache_consistency_bitmask, }; struct nfs42_clone_res res = { diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 223bedda64ae..10410e8b5853 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) return ret; idr_preload(GFP_KERNEL); spin_lock(&nn->nfs_client_lock); - ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT); + ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT); if (ret >= 0) clp->cl_cb_ident = ret; spin_unlock(&nn->nfs_client_lock); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 4aa571956cd6..db9b5fea5b3e 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -7,6 +7,7 @@ #include <linux/file.h> #include <linux/falloc.h> #include <linux/nfs_fs.h> +#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */ #include "delegation.h" #include "internal.h" #include "iostat.h" @@ -203,6 +204,7 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, struct fd src_file; struct inode *src_inode; unsigned int bs = server->clone_blksize; + bool same_inode = false; int ret; /* dst file must be opened for writing */ @@ -221,10 +223,8 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, src_inode = file_inode(src_file.file); - /* src and dst must be different files */ - ret = -EINVAL; if (src_inode == dst_inode) - goto out_fput; + same_inode = true; /* src file must be opened for reading */ if (!(src_file.file->f_mode & FMODE_READ)) @@ -249,8 +249,16 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, goto out_fput; } + /* verify if ranges are overlapped within the same file */ + if (same_inode) { + if (dst_off + count > src_off && dst_off < src_off + count) + goto out_fput; + } + /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ - if (dst_inode < src_inode) { + if (same_inode) { + mutex_lock(&src_inode->i_mutex); + } else if (dst_inode < src_inode) { mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); } else { @@ -275,7 +283,9 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); out_unlock: - if (dst_inode < src_inode) { + if (same_inode) { + mutex_unlock(&src_inode->i_mutex); + } else if (dst_inode < src_inode) { mutex_unlock(&src_inode->i_mutex); mutex_unlock(&dst_inode->i_mutex); } else { @@ -291,46 +301,31 @@ out_drop_write: static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) { - struct nfs_ioctl_clone_range_args args; + struct btrfs_ioctl_clone_range_args args; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_off, args.dst_off, args.count); -} -#else -static long nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd, - u64 src_off, u64 dst_off, u64 count) -{ - return -ENOTTY; -} - -static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) -{ - return -ENOTTY; + return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_offset, + args.dest_offset, args.src_length); } -#endif /* CONFIG_NFS_V4_2 */ long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; switch (cmd) { - case NFS_IOC_CLONE: + case BTRFS_IOC_CLONE: return nfs42_ioctl_clone(file, arg, 0, 0, 0); - case NFS_IOC_CLONE_RANGE: + case BTRFS_IOC_CLONE_RANGE: return nfs42_ioctl_clone_range(file, argp); } return -ENOTTY; } +#endif /* CONFIG_NFS_V4_2 */ const struct file_operations nfs4_file_operations = { -#ifdef CONFIG_NFS_V4_2 - .llseek = nfs4_file_llseek, -#else - .llseek = nfs_file_llseek, -#endif .read_iter = nfs_file_read, .write_iter = nfs_file_write, .mmap = nfs_file_mmap, @@ -342,14 +337,14 @@ const struct file_operations nfs4_file_operations = { .flock = nfs_flock, .splice_read = nfs_file_splice_read, .splice_write = iter_file_splice_write, -#ifdef CONFIG_NFS_V4_2 - .fallocate = nfs42_fallocate, -#endif /* CONFIG_NFS_V4_2 */ .check_flags = nfs_check_flags, .setlease = simple_nosetlease, -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NFS_V4_2 + .llseek = nfs4_file_llseek, + .fallocate = nfs42_fallocate, .unlocked_ioctl = nfs4_ioctl, -#else .compat_ioctl = nfs4_ioctl, -#endif /* CONFIG_COMPAT */ +#else + .llseek = nfs_file_llseek, +#endif }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 765a03559363..89818036f035 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7866,7 +7866,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) spin_unlock(&inode->i_lock); goto out_restart; } - if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) + if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN) goto out_restart; out: dprintk("<-- %s\n", __func__); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index dfed4f5c8fcc..4e4441216804 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3615,6 +3615,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st status = 0; if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) goto out; + bitmap[0] &= ~FATTR4_WORD0_FS_LOCATIONS; status = -EIO; /* Ignore borken servers that return unrequested attrs */ if (unlikely(res == NULL)) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 93496c059837..5a8ae2125b50 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); - lgp = kzalloc(sizeof(*lgp), gfp_flags); - if (lgp == NULL) - return NULL; + /* + * Synchronously retrieve layout information from server and + * store in lseg. If we race with a concurrent seqid morphing + * op, then re-send the LAYOUTGET. + */ + do { + lgp = kzalloc(sizeof(*lgp), gfp_flags); + if (lgp == NULL) + return NULL; + + i_size = i_size_read(ino); + + lgp->args.minlength = PAGE_CACHE_SIZE; + if (lgp->args.minlength > range->length) + lgp->args.minlength = range->length; + if (range->iomode == IOMODE_READ) { + if (range->offset >= i_size) + lgp->args.minlength = 0; + else if (i_size - range->offset < lgp->args.minlength) + lgp->args.minlength = i_size - range->offset; + } + lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; + lgp->args.range = *range; + lgp->args.type = server->pnfs_curr_ld->id; + lgp->args.inode = ino; + lgp->args.ctx = get_nfs_open_context(ctx); + lgp->gfp_flags = gfp_flags; + lgp->cred = lo->plh_lc_cred; - i_size = i_size_read(ino); + lseg = nfs4_proc_layoutget(lgp, gfp_flags); + } while (lseg == ERR_PTR(-EAGAIN)); - lgp->args.minlength = PAGE_CACHE_SIZE; - if (lgp->args.minlength > range->length) - lgp->args.minlength = range->length; - if (range->iomode == IOMODE_READ) { - if (range->offset >= i_size) - lgp->args.minlength = 0; - else if (i_size - range->offset < lgp->args.minlength) - lgp->args.minlength = i_size - range->offset; - } - lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; - lgp->args.range = *range; - lgp->args.type = server->pnfs_curr_ld->id; - lgp->args.inode = ino; - lgp->args.ctx = get_nfs_open_context(ctx); - lgp->gfp_flags = gfp_flags; - lgp->cred = lo->plh_lc_cred; - - /* Synchronously retrieve layout information from server and - * store in lseg. - */ - lseg = nfs4_proc_layoutget(lgp, gfp_flags); if (IS_ERR(lseg)) { switch (PTR_ERR(lseg)) { case -ENOMEM: @@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) /* existing state ID, make sure the sequence number matches. */ if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { dprintk("%s forget reply due to sequence\n", __func__); + status = -EAGAIN; goto out_forget_reply; } pnfs_set_layout_stateid(lo, &res->stateid, false); diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 871fcb67be97..0a8983492d91 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -195,8 +195,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, struct dentry *dentry, struct path *lowerpath, - struct kstat *stat, struct iattr *attr, - const char *link) + struct kstat *stat, const char *link) { struct inode *wdir = workdir->d_inode; struct inode *udir = upperdir->d_inode; @@ -240,8 +239,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, mutex_lock(&newdentry->d_inode->i_mutex); err = ovl_set_attr(newdentry, stat); - if (!err && attr) - err = notify_change(newdentry, attr, NULL); mutex_unlock(&newdentry->d_inode->i_mutex); if (err) goto out_cleanup; @@ -286,8 +283,7 @@ out_cleanup: * that point the file will have already been copied up anyway. */ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, - struct path *lowerpath, struct kstat *stat, - struct iattr *attr) + struct path *lowerpath, struct kstat *stat) { struct dentry *workdir = ovl_workdir(dentry); int err; @@ -345,26 +341,19 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, } upperdentry = ovl_dentry_upper(dentry); if (upperdentry) { - unlock_rename(workdir, upperdir); + /* Raced with another copy-up? Nothing to do, then... */ err = 0; - /* Raced with another copy-up? Do the setattr here */ - if (attr) { - mutex_lock(&upperdentry->d_inode->i_mutex); - err = notify_change(upperdentry, attr, NULL); - mutex_unlock(&upperdentry->d_inode->i_mutex); - } - goto out_put_cred; + goto out_unlock; } err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, - stat, attr, link); + stat, link); if (!err) { /* Restore timestamps on parent (best effort) */ ovl_set_timestamps(upperdir, &pstat); } out_unlock: unlock_rename(workdir, upperdir); -out_put_cred: revert_creds(old_cred); put_cred(override_cred); @@ -406,7 +395,7 @@ int ovl_copy_up(struct dentry *dentry) ovl_path_lower(next, &lowerpath); err = vfs_getattr(&lowerpath, &stat); if (!err) - err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL); + err = ovl_copy_up_one(parent, next, &lowerpath, &stat); dput(parent); dput(next); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index ec0c2a050043..4060ffde8722 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -12,8 +12,7 @@ #include <linux/xattr.h> #include "overlayfs.h" -static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, - bool no_data) +static int ovl_copy_up_truncate(struct dentry *dentry) { int err; struct dentry *parent; @@ -30,10 +29,8 @@ static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, if (err) goto out_dput_parent; - if (no_data) - stat.size = 0; - - err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr); + stat.size = 0; + err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat); out_dput_parent: dput(parent); @@ -49,13 +46,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) if (err) goto out; - upperdentry = ovl_dentry_upper(dentry); - if (upperdentry) { + err = ovl_copy_up(dentry); + if (!err) { + upperdentry = ovl_dentry_upper(dentry); + mutex_lock(&upperdentry->d_inode->i_mutex); err = notify_change(upperdentry, attr, NULL); mutex_unlock(&upperdentry->d_inode->i_mutex); - } else { - err = ovl_copy_up_last(dentry, attr, false); } ovl_drop_write(dentry); out: @@ -353,7 +350,7 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags) return ERR_PTR(err); if (file_flags & O_TRUNC) - err = ovl_copy_up_last(dentry, NULL, true); + err = ovl_copy_up_truncate(dentry); else err = ovl_copy_up(dentry); ovl_drop_write(dentry); diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index ea5a40b06e3a..e17154aeaae4 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -194,7 +194,6 @@ void ovl_cleanup(struct inode *dir, struct dentry *dentry); /* copy_up.c */ int ovl_copy_up(struct dentry *dentry); int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, - struct path *lowerpath, struct kstat *stat, - struct iattr *attr); + struct path *lowerpath, struct kstat *stat); int ovl_copy_xattr(struct dentry *old, struct dentry *new); int ovl_set_attr(struct dentry *upper, struct kstat *stat); diff --git a/fs/splice.c b/fs/splice.c index 801c21cd77fe..4cf700d50b40 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des */ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { + /* + * Check for signal early to make process killable when there are + * always buffers available + */ + if (signal_pending(current)) + return -ERESTARTSYS; + while (!pipe->nrbufs) { if (!pipe->writers) return 0; @@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_from_pipe_begin(sd); do { + cond_resched(); ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 590ad9206e3f..02fa1dcc5969 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) inode->i_fop = &sysv_dir_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISLNK(inode->i_mode)) { - if (inode->i_blocks) { - inode->i_op = &sysv_symlink_inode_operations; - inode->i_mapping->a_ops = &sysv_aops; - } else { - inode->i_op = &simple_symlink_inode_operations; - inode->i_link = (char *)SYSV_I(inode)->i_data; - nd_terminate_link(inode->i_link, inode->i_size, - sizeof(SYSV_I(inode)->i_data) - 1); - } + inode->i_op = &sysv_symlink_inode_operations; + inode->i_mapping->a_ops = &sysv_aops; } else init_special_inode(inode, inode->i_mode, rdev); } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0b921ae06cd8..a8e01aaca087 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -309,6 +309,11 @@ struct drm_file { unsigned universal_planes:1; /* true if client understands atomic properties */ unsigned atomic:1; + /* + * This client is allowed to gain master privileges for @master. + * Protected by struct drm_device::master_mutex. + */ + unsigned allowed_master:1; struct pid *pid; kuid_t uid; @@ -344,6 +349,8 @@ struct drm_file { struct list_head event_list; int event_space; + struct mutex event_read_lock; + struct drm_prime_file_private prime; }; @@ -910,6 +917,7 @@ extern int drm_open(struct inode *inode, struct file *filp); extern ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); extern int drm_release(struct inode *inode, struct file *filp); +extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); /* Mapping support (drm_vm.h) */ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); @@ -947,6 +955,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, struct drm_pending_vblank_event *e); extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); +extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e); +extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); @@ -1111,4 +1123,7 @@ static __inline__ bool drm_can_sleep(void) return true; } +/* helper for handling conditionals in various for_each macros */ +#define for_each_if(condition) if (!(condition)) {} else + #endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 4b74c97d297a..d8576ac55693 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -149,7 +149,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((connector) = (state)->connectors[__i], \ (connector_state) = (state)->connector_states[__i], 1); \ (__i)++) \ - if (connector) + for_each_if (connector) #define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ for ((__i) = 0; \ @@ -157,7 +157,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((crtc) = (state)->crtcs[__i], \ (crtc_state) = (state)->crtc_states[__i], 1); \ (__i)++) \ - if (crtc_state) + for_each_if (crtc_state) #define for_each_plane_in_state(state, plane, plane_state, __i) \ for ((__i) = 0; \ @@ -165,7 +165,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((plane) = (state)->planes[__i], \ (plane_state) = (state)->plane_states[__i], 1); \ (__i)++) \ - if (plane_state) + for_each_if (plane_state) static inline bool drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) { diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 8cba54a2a0a0..a286cce98720 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -62,6 +62,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); +void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, + bool atomic); void drm_atomic_helper_swap_state(struct drm_device *dev, struct drm_atomic_state *state); @@ -81,6 +83,12 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set); int __drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_atomic_state *state); +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state); + int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3f0c6909dda1..4765df331002 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -85,7 +85,11 @@ static inline uint64_t I642U64(int64_t val) return (uint64_t)*((uint64_t *)&val); } -/* rotation property bits */ +/* + * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the + * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and + * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation + */ #define DRM_ROTATE_MASK 0x0f #define DRM_ROTATE_0 0 #define DRM_ROTATE_90 1 @@ -992,7 +996,7 @@ struct drm_mode_set { struct drm_mode_config_funcs { struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); + const struct drm_mode_fb_cmd2 *mode_cmd); void (*output_poll_changed)(struct drm_device *dev); int (*atomic_check)(struct drm_device *dev, @@ -1166,7 +1170,7 @@ struct drm_mode_config { */ #define drm_for_each_plane_mask(plane, dev, plane_mask) \ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ - if ((plane_mask) & (1 << drm_plane_index(plane))) + for_each_if ((plane_mask) & (1 << drm_plane_index(plane))) #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) @@ -1543,7 +1547,7 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev, /* Plane list iterator for legacy (overlay only) planes. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ - if (plane->type == DRM_PLANE_TYPE_OVERLAY) + for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 3febb4b9fce9..e22ab29d2d00 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -197,7 +197,7 @@ extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode); extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - struct drm_mode_fb_cmd2 *mode_cmd); + const struct drm_mode_fb_cmd2 *mode_cmd); static inline void drm_crtc_helper_add(struct drm_crtc *crtc, const struct drm_crtc_helper_funcs *funcs) diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index bb9d0deca07c..1252108da0ef 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -455,16 +455,52 @@ # define DP_EDP_14 0x03 #define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) #define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) #define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) #define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 #define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) #define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ #define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 #define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index c54cf3d4a03f..be62bd321e75 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -18,7 +18,7 @@ void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, - struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 15e7f007380f..0b3e11ab8757 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,76 +35,129 @@ */ /** - * This structure defines the drm_mm memory object, which will be used by the - * DRM for its buffer objects. + * struct drm_gem_object - GEM buffer object + * + * This structure defines the generic parts for GEM buffer objects, which are + * mostly around handling mmap and userspace handles. + * + * Buffer objects are often abbreviated to BO. */ struct drm_gem_object { - /** Reference count of this object */ + /** + * @refcount: + * + * Reference count of this object + * + * Please use drm_gem_object_reference() to acquire and + * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked() + * to release a reference to a GEM buffer object. + */ struct kref refcount; /** - * handle_count - gem file_priv handle count of this object + * @handle_count: + * + * This is the GEM file_priv handle count of this object. * * Each handle also holds a reference. Note that when the handle_count * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * * Protected by dev->object_name_lock. - * */ + */ unsigned handle_count; - /** Related drm device */ + /** + * @dev: DRM dev this object belongs to. + */ struct drm_device *dev; - /** File representing the shmem storage */ + /** + * @filp: + * + * SHMEM file node used as backing storage for swappable buffer objects. + * GEM also supports driver private objects with driver-specific backing + * storage (contiguous CMA memory, special reserved blocks). In this + * case @filp is NULL. + */ struct file *filp; - /* Mapping info for this object */ + /** + * @vma_node: + * + * Mapping info for this object to support mmap. Drivers are supposed to + * allocate the mmap offset using drm_gem_create_mmap_offset(). The + * offset itself can be retrieved using drm_vma_node_offset_addr(). + * + * Memory mapping itself is handled by drm_gem_mmap(), which also checks + * that userspace is allowed to access the object. + */ struct drm_vma_offset_node vma_node; /** + * @size: + * * Size of the object, in bytes. Immutable over the object's * lifetime. */ size_t size; /** + * @name: + * * Global name for this object, starts at 1. 0 means unnamed. - * Access is covered by the object_name_lock in the related drm_device + * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK + * and GEM_OPEN ioctls. */ int name; /** - * Memory domains. These monitor which caches contain read/write data + * @read_domains: + * + * Read memory domains. These monitor which caches contain read/write data * related to the object. When transitioning from one set of domains * to another, the driver is called to ensure that caches are suitably - * flushed and invalidated + * flushed and invalidated. */ uint32_t read_domains; + + /** + * @write_domain: Corresponding unique write memory domain. + */ uint32_t write_domain; /** + * @pending_read_domains: + * * While validating an exec operation, the * new read/write domain values are computed here. * They will be transferred to the above values * at the point that any cache flushing occurs */ uint32_t pending_read_domains; + + /** + * @pending_write_domain: Write domain similar to @pending_read_domains. + */ uint32_t pending_write_domain; /** - * dma_buf - dma buf associated with this GEM object + * @dma_buf: + * + * dma-buf associated with this GEM object. * * Pointer to the dma-buf associated with this gem object (either * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * - * Protected by obj->object_name_lock + * Protected by obj->object_name_lock. */ struct dma_buf *dma_buf; /** - * import_attach - dma buf attachment backing this object + * @import_attach: + * + * dma-buf attachment backing this object. * * Any foreign dma_buf imported as a gem object has this set to the * attachment point for the device. This is invariant over the lifetime @@ -133,12 +186,30 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +/** + * drm_gem_object_reference - acquire a GEM BO reference + * @obj: GEM buffer object + * + * This acquires additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ static inline void drm_gem_object_reference(struct drm_gem_object *obj) { kref_get(&obj->refcount); } +/** + * drm_gem_object_unreference - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must hold the dev->struct_mutex + * lock when calling this function, even when the driver doesn't use + * dev->struct_mutex for anything. + * + * For drivers not encumbered with legacy locking use + * drm_gem_object_unreference_unlocked() instead. + */ static inline void drm_gem_object_unreference(struct drm_gem_object *obj) { @@ -149,6 +220,13 @@ drm_gem_object_unreference(struct drm_gem_object *obj) } } +/** + * drm_gem_object_unreference_unlocked - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must not hold the + * dev->struct_mutex lock when calling this function. + */ static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 0de6290df4da..fc65118e5077 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -148,8 +148,7 @@ static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { - return list_entry(hole_node->node_list.next, - struct drm_mm_node, node_list)->start; + return list_next_entry(hole_node, node_list)->start; } /** @@ -180,6 +179,14 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) &(mm)->head_node.node_list, \ node_list) +#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ + for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ + &entry->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(entry), \ + hole_end = drm_mm_hole_node_end(entry), \ + 1 : 0; \ + entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) + /** * drm_mm_for_each_hole - iterator to walk over all holes * @entry: drm_mm_node used internally to track progress @@ -200,20 +207,7 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) * going backwards. */ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ - for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) - -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ - for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) + __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) /* * Basic range manager support (drm_mm.c) diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 08a8cac9e555..f9115aee43f4 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -222,6 +222,8 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 94938d89347c..c5576fbcb909 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -138,7 +138,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); struct drm_modeset_acquire_ctx * drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); -int drm_modeset_lock_all_crtcs(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx); +int drm_modeset_lock_all_ctx(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); #endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 26bb55e9e8b6..83bb156d4356 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -162,7 +162,8 @@ int drm_rect_calc_hscale_relaxed(struct drm_rect *src, int drm_rect_calc_vscale_relaxed(struct drm_rect *src, struct drm_rect *dst, int min_vscale, int max_vscale); -void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); +void drm_rect_debug_print(const char *prefix, + const struct drm_rect *r, bool fixed_point); void drm_rect_rotate(struct drm_rect *r, int width, int height, unsigned int rotation); diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index 30d89e0da2c6..b46fa0ef3005 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -31,47 +31,94 @@ #define MAX_PORTS 5 /** - * struct i915_audio_component_ops - callbacks defined in gfx driver - * @owner: the module owner - * @get_power: get the POWER_DOMAIN_AUDIO power well - * @put_power: put the POWER_DOMAIN_AUDIO power well - * @codec_wake_override: Enable/Disable generating the codec wake signal - * @get_cdclk_freq: get the Core Display Clock in KHz - * @sync_audio_rate: set n/cts based on the sample rate + * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver */ struct i915_audio_component_ops { + /** + * @owner: i915 module + */ struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + */ void (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ void (*put_power)(struct device *); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ int (*sync_audio_rate)(struct device *, int port, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, bool *enabled, + unsigned char *buf, int max_bytes); }; +/** + * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver + */ struct i915_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ void *audio_ptr; /** - * Call from i915 driver, notifying the HDA driver that - * pin sense and/or ELD information has changed. - * @audio_ptr: HDA driver object - * @port: Which port has changed (PORTA / PORTB / PORTC etc) + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the i915 driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). */ void (*pin_eld_notify)(void *audio_ptr, int port); }; /** - * struct i915_audio_component - used for audio video interaction - * @dev: the device from gfx driver - * @aud_sample_rate: the array of audio sample rate per port - * @ops: callback for audio driver calling - * @audio_ops: Call from i915 driver + * struct i915_audio_component - Used for direct communication between i915 and hda drivers */ struct i915_audio_component { + /** + * @dev: i915 device, used as parameter for ops + */ struct device *dev; + /** + * @aud_sample_rate: the array of audio sample rate per port + */ int aud_sample_rate[MAX_PORTS]; - + /** + * @ops: Ops implemented by i915 driver, called by hda driver + */ const struct i915_audio_component_ops *ops; - + /** + * @audio_ops: Ops implemented by hda driver, called by i915 driver + */ const struct i915_audio_component_audio_ops *audio_ops; }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 17c445612e01..f1a113e35f98 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -291,4 +291,40 @@ INTEL_VGA_DEVICE(0x1A84, info), \ INTEL_VGA_DEVICE(0x5A84, info) +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ + +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */ + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9c747cb14ad8..d2f41477f8ae 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, struct irq_phys_map *map, bool level); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); -int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int irq); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 054833939995..1991aea2ec4c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, } static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, const char *cells_name, - size_t index, struct acpi_reference_args *args) + const char *name, size_t index, + struct acpi_reference_args *args) { return -ENXIO; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c0d2b7927c1f..0169ba2e2e64 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -254,6 +254,7 @@ struct queue_limits { unsigned long virt_boundary_mask; unsigned int max_hw_sectors; + unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_segment_size; @@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len); -extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, @@ -960,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); -extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_segments(struct request_queue *, unsigned short); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index de464e6683b6..83d1926c61e4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -40,6 +40,7 @@ struct bpf_map { struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; + atomic_t usercnt; }; struct bpf_map_type_list { @@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd); void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put_rcu(struct bpf_prog *prog); -struct bpf_map *bpf_map_get(u32 ufd); +struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); +void bpf_map_inc(struct bpf_map *map, bool uref); +void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); extern int sysctl_unprivileged_bpf_disabled; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ef4c5b1a860f..177c7680c1a8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -77,6 +77,7 @@ struct cpufreq_policy { unsigned int suspend_freq; /* freq to set during suspend */ unsigned int policy; /* see above */ + unsigned int last_policy; /* policy before unplug */ struct cpufreq_governor *governor; /* see below */ void *governor_data; bool governor_enabled; /* governor start/stop flag */ diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index cc92268af89a..6ac3cad9aef1 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h @@ -27,7 +27,7 @@ #ifdef __KERNEL__ extern int dns_query(const char *type, const char *name, size_t namelen, - const char *options, char **_result, time_t *_expiry); + const char *options, char **_result, time64_t *_expiry); #endif /* KERNEL */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0ef2a97ccdb5..402753bccafa 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -227,7 +227,7 @@ struct ipv6_pinfo { struct ipv6_ac_socklist *ipv6_ac_list; struct ipv6_fl_socklist __rcu *ipv6_fl_list; - struct ipv6_txoptions *opt; + struct ipv6_txoptions __rcu *opt; struct sk_buff *pktoptions; struct sk_buff *rxpmtu; struct inet6_cork cork; diff --git a/include/linux/kref.h b/include/linux/kref.h index 484604d184be..e15828fd71f1 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -19,7 +19,6 @@ #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/mutex.h> -#include <linux/spinlock.h> struct kref { atomic_t refcount; @@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) return kref_sub(kref, 1, release); } -/** - * kref_put_spinlock_irqsave - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. - * @lock: lock to take in release case - * - * Behaves identical to kref_put with one exception. If the reference count - * drops to zero, the lock will be taken atomically wrt dropping the reference - * count. The release function has to call spin_unlock() without _irqrestore. - */ -static inline int kref_put_spinlock_irqsave(struct kref *kref, - void (*release)(struct kref *kref), - spinlock_t *lock) -{ - unsigned long flags; - - WARN_ON(release == NULL); - if (atomic_add_unless(&kref->refcount, -1, 1)) - return 0; - spin_lock_irqsave(lock, flags); - if (atomic_dec_and_test(&kref->refcount)) { - release(kref); - local_irq_restore(flags); - return 1; - } - spin_unlock_irqrestore(lock, flags); - return 0; -} - static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), struct mutex *lock) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5706a2108f0a..c923350ca20a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) +static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) +{ + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) + if (vcpu->vcpu_id == id) + return vcpu; + return NULL; +} + #define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 69c9057e1ab8..c6916aec43b6 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -58,7 +58,6 @@ enum { struct nvm_id_group { u8 mtype; u8 fmtype; - u16 res16; u8 num_ch; u8 num_lun; u8 num_pln; @@ -74,9 +73,9 @@ struct nvm_id_group { u32 tbet; u32 tbem; u32 mpos; + u32 mccap; u16 cpar; - u8 res[913]; -} __packed; +}; struct nvm_addr_format { u8 ch_offset; @@ -91,19 +90,15 @@ struct nvm_addr_format { u8 pg_len; u8 sect_offset; u8 sect_len; - u8 res[4]; }; struct nvm_id { u8 ver_id; u8 vmnt; u8 cgrps; - u8 res[5]; u32 cap; u32 dom; struct nvm_addr_format ppaf; - u8 ppat; - u8 resv[224]; struct nvm_id_group groups[4]; } __packed; @@ -123,39 +118,28 @@ struct nvm_tgt_instance { #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 -#define NVM_SEC_BITS (8) -#define NVM_PL_BITS (6) -#define NVM_PG_BITS (16) #define NVM_BLK_BITS (16) -#define NVM_LUN_BITS (10) +#define NVM_PG_BITS (16) +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (8) +#define NVM_LUN_BITS (8) #define NVM_CH_BITS (8) struct ppa_addr { + /* Generic structure for all addresses */ union { - /* Channel-based PPA format in nand 4x2x2x2x8x10 */ - struct { - u64 ch : 4; - u64 sec : 2; /* 4 sectors per page */ - u64 pl : 2; /* 4 planes per LUN */ - u64 lun : 2; /* 4 LUNs per channel */ - u64 pg : 8; /* 256 pages per block */ - u64 blk : 10;/* 1024 blocks per plane */ - u64 resved : 36; - } chnl; - - /* Generic structure for all addresses */ struct { + u64 blk : NVM_BLK_BITS; + u64 pg : NVM_PG_BITS; u64 sec : NVM_SEC_BITS; u64 pl : NVM_PL_BITS; - u64 pg : NVM_PG_BITS; - u64 blk : NVM_BLK_BITS; u64 lun : NVM_LUN_BITS; u64 ch : NVM_CH_BITS; } g; u64 ppa; }; -} __packed; +}; struct nvm_rq { struct nvm_tgt_instance *ins; @@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) struct nvm_block; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); -typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); +typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, nvm_l2p_update_fn *, void *); -typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, nvm_bb_update_fn *, void *); typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); @@ -210,7 +194,7 @@ struct nvm_dev_ops { nvm_id_fn *identity; nvm_get_l2p_tbl_fn *get_l2p_tbl; nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb; + nvm_op_set_bb_fn *set_bb_tbl; nvm_submit_io_fn *submit_io; nvm_erase_blk_fn *erase_block; @@ -220,7 +204,7 @@ struct nvm_dev_ops { nvm_dev_dma_alloc_fn *dev_dma_alloc; nvm_dev_dma_free_fn *dev_dma_free; - uint8_t max_phys_sect; + unsigned int max_phys_sect; }; struct nvm_lun { @@ -229,7 +213,9 @@ struct nvm_lun { int lun_id; int chnl_id; + unsigned int nr_inuse_blocks; /* Number of used blocks */ unsigned int nr_free_blocks; /* Number of unused blocks */ + unsigned int nr_bad_blocks; /* Number of bad blocks */ struct nvm_block *blocks; spinlock_t lock; @@ -263,8 +249,7 @@ struct nvm_dev { int blks_per_lun; int sec_size; int oob_size; - int addr_mode; - struct nvm_addr_format addr_format; + struct nvm_addr_format ppaf; /* Calculated/Cached values. These do not reflect the actual usable * blocks at run-time. @@ -290,118 +275,45 @@ struct nvm_dev { char name[DISK_NAME_LEN]; }; -/* fallback conversion */ -static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct ppa_addr l; - - l.ppa = r.g.sec + - r.g.pg * dev->sec_per_pg + - r.g.blk * (dev->pgs_per_blk * - dev->sec_per_pg) + - r.g.lun * (dev->blks_per_lun * - dev->pgs_per_blk * - dev->sec_per_pg) + - r.g.ch * (dev->blks_per_lun * - dev->pgs_per_blk * - dev->luns_per_chnl * - dev->sec_per_pg); - - return l; -} - -/* fallback conversion */ -static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) +static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, + struct ppa_addr r) { struct ppa_addr l; - int secs, pgs, blks, luns; - sector_t ppa = r.ppa; - l.ppa = 0; - - div_u64_rem(ppa, dev->sec_per_pg, &secs); - l.g.sec = secs; - - sector_div(ppa, dev->sec_per_pg); - div_u64_rem(ppa, dev->sec_per_blk, &pgs); - l.g.pg = pgs; - - sector_div(ppa, dev->pgs_per_blk); - div_u64_rem(ppa, dev->blks_per_lun, &blks); - l.g.blk = blks; - - sector_div(ppa, dev->blks_per_lun); - div_u64_rem(ppa, dev->luns_per_chnl, &luns); - l.g.lun = luns; - - sector_div(ppa, dev->luns_per_chnl); - l.g.ch = ppa; + l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; + l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; + l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; + l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; + l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; + l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; return l; } -static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) +static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) { struct ppa_addr l; - l.ppa = 0; - - l.chnl.sec = r.g.sec; - l.chnl.pl = r.g.pl; - l.chnl.pg = r.g.pg; - l.chnl.blk = r.g.blk; - l.chnl.lun = r.g.lun; - l.chnl.ch = r.g.ch; - - return l; -} - -static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) -{ - struct ppa_addr l; - - l.ppa = 0; - - l.g.sec = r.chnl.sec; - l.g.pl = r.chnl.pl; - l.g.pg = r.chnl.pg; - l.g.blk = r.chnl.blk; - l.g.lun = r.chnl.lun; - l.g.ch = r.chnl.ch; + /* + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. + */ + l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & + (((1 << dev->ppaf.blk_len) - 1)); + l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & + (((1 << dev->ppaf.pg_len) - 1)); + l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & + (((1 << dev->ppaf.sect_len) - 1)); + l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & + (((1 << dev->ppaf.pln_len) - 1)); + l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & + (((1 << dev->ppaf.lun_len) - 1)); + l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & + (((1 << dev->ppaf.ch_len) - 1)); return l; } -static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, - struct ppa_addr gppa) -{ - switch (dev->addr_mode) { - case NVM_ADDRMODE_LINEAR: - return __linear_to_generic_addr(dev, gppa); - case NVM_ADDRMODE_CHANNEL: - return __chnl_to_generic_addr(gppa); - default: - BUG(); - } - return gppa; -} - -static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, - struct ppa_addr gppa) -{ - switch (dev->addr_mode) { - case NVM_ADDRMODE_LINEAR: - return __generic_to_linear_addr(dev, gppa); - case NVM_ADDRMODE_CHANNEL: - return __generic_to_chnl_addr(gppa); - default: - BUG(); - } - return gppa; -} - static inline int ppa_empty(struct ppa_addr ppa_addr) { return (ppa_addr.ppa == ADDR_EMPTY); @@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, unsigned long); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); -typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); +typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); struct nvmm_type { const char *name; @@ -492,7 +404,7 @@ struct nvmm_type { nvmm_get_lun_fn *get_lun; /* Statistics */ - nvmm_free_blocks_print_fn *free_blocks_print; + nvmm_lun_info_print_fn *lun_info_print; struct list_head list; }; diff --git a/include/linux/net.h b/include/linux/net.h index 70ac5e28e6b7..0b4ac7da583a 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -34,8 +34,12 @@ struct inode; struct file; struct net; -#define SOCK_ASYNC_NOSPACE 0 -#define SOCK_ASYNC_WAITDATA 1 +/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located + * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. + * Eventually all flags will be in sk->sk_wq_flags. + */ +#define SOCKWQ_ASYNC_NOSPACE 0 +#define SOCKWQ_ASYNC_WAITDATA 1 #define SOCK_NOSPACE 2 #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 @@ -89,6 +93,7 @@ struct socket_wq { /* Note: wait MUST be first field of socket_wq */ wait_queue_head_t wait; struct fasync_struct *fasync_list; + unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */ struct rcu_head rcu; } ____cacheline_aligned_in_smp; @@ -96,7 +101,7 @@ struct socket_wq { * struct socket - general BSD socket * @state: socket state (%SS_CONNECTED, etc) * @type: socket type (%SOCK_STREAM, etc) - * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) + * @flags: socket flags (%SOCK_NOSPACE, etc) * @ops: protocol specific socket operations * @file: File back pointer for gc * @sk: internal networking protocol agnostic socket representation @@ -202,7 +207,7 @@ enum { SOCK_WAKE_URG, }; -int sock_wake_async(struct socket *sk, int how, int band); +int sock_wake_async(struct socket_wq *sk_wq, int how, int band); int sock_register(const struct net_proto_family *fam); void sock_unregister(int family); int __sock_create(struct net *net, int family, int type, int proto, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 67bfac1abfc1..3b5d134e945a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1398,7 +1398,8 @@ enum netdev_priv_flags { * @dma: DMA channel * @mtu: Interface MTU value * @type: Interface hardware type - * @hard_header_len: Hardware header length + * @hard_header_len: Hardware header length, which means that this is the + * minimum size of a packet. * * @needed_headroom: Extra headroom the hardware may need, but not in all * cases can this be guaranteed diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 570d630f98ae..11bbae44f4cb 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -251,6 +251,7 @@ struct nfs4_layoutget { struct nfs4_layoutget_res res; struct rpc_cred *cred; gfp_t gfp_flags; + long timeout; }; struct nfs4_getdeviceinfo_args { diff --git a/include/linux/pci.h b/include/linux/pci.h index e828e7b4afec..6ae25aae88fd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -412,9 +412,18 @@ struct pci_host_bridge { void (*release_fn)(struct pci_host_bridge *); void *release_data; unsigned int ignore_reset_delay:1; /* for entire hierarchy */ + /* Resource alignment requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) + +struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); + void pci_set_host_bridge_release(struct pci_host_bridge *bridge, void (*release_fn)(struct pci_host_bridge *), void *release_data); diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index 80af3cd35ae4..72ce932c69b2 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -71,7 +71,7 @@ struct scpi_ops { int (*sensor_get_value)(u16, u32 *); }; -#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) +#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) struct scpi_ops *get_scpi_ops(void); #else static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a156b82dd14c..c2b66a277e98 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename, asmlinkage long sys_lchown(const char __user *filename, uid_t user, gid_t group); asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); -#ifdef CONFIG_UID16 +#ifdef CONFIG_HAVE_UID16 asmlinkage long sys_chown16(const char __user *filename, old_uid_t user, old_gid_t group); asmlinkage long sys_lchown16(const char __user *filename, diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 4014a59828fc..613c29bd6baf 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister( static inline int thermal_zone_bind_cooling_device( struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower) + unsigned long upper, unsigned long lower, + unsigned int weight) { return -ENODEV; } static inline int thermal_zone_unbind_cooling_device( struct thermal_zone_device *tz, int trip, diff --git a/include/linux/types.h b/include/linux/types.h index 70d8500bddf1..70dd3dfde631 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t; typedef unsigned long uintptr_t; -#ifdef CONFIG_UID16 +#ifdef CONFIG_HAVE_UID16 /* This is defined by include/asm-{arch}/posix_types.h */ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; diff --git a/include/net/af_unix.h b/include/net/af_unix.h index b36d837c701e..2a91a0561a47 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -62,6 +62,7 @@ struct unix_sock { #define UNIX_GC_CANDIDATE 0 #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; + wait_queue_t peer_wake; }; static inline struct unix_sock *unix_sk(const struct sock *sk) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2bfb2ad2fab1..877f682989b8 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); /* * Store a destination cache entry in a socket */ -static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, - const struct in6_addr *daddr, - const struct in6_addr *saddr) +static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, + const struct in6_addr *daddr, + const struct in6_addr *saddr) { struct ipv6_pinfo *np = inet6_sk(sk); - struct rt6_info *rt = (struct rt6_info *) dst; + np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst); sk_setup_caps(sk, dst); np->daddr_cache = daddr; #ifdef CONFIG_IPV6_SUBTREES np->saddr_cache = saddr; #endif - np->dst_cookie = rt6_get_cookie(rt); -} - -static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, - struct in6_addr *daddr, struct in6_addr *saddr) -{ - spin_lock(&sk->sk_dst_lock); - __ip6_dst_store(sk, dst, daddr, saddr); - spin_unlock(&sk->sk_dst_lock); } static inline bool ipv6_unicast_destination(const struct sk_buff *skb) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e1a10b0ac0b0..9a5c9f013784 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock; */ struct ipv6_txoptions { + atomic_t refcnt; /* Length of this structure */ int tot_len; @@ -217,7 +218,7 @@ struct ipv6_txoptions { struct ipv6_opt_hdr *dst0opt; struct ipv6_rt_hdr *srcrt; /* Routing Header */ struct ipv6_opt_hdr *dst1opt; - + struct rcu_head rcu; /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ }; @@ -252,6 +253,24 @@ struct ipv6_fl_socklist { struct rcu_head rcu; }; +static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) +{ + struct ipv6_txoptions *opt; + + rcu_read_lock(); + opt = rcu_dereference(np->opt); + if (opt && !atomic_inc_not_zero(&opt->refcnt)) + opt = NULL; + rcu_read_unlock(); + return opt; +} + +static inline void txopt_put(struct ipv6_txoptions *opt) +{ + if (opt && atomic_dec_and_test(&opt->refcnt)) + kfree_rcu(opt, rcu); +} + struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, @@ -490,6 +509,7 @@ struct ip6_create_arg { u32 user; const struct in6_addr *src; const struct in6_addr *dst; + int iif; u8 ecn; }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 82045fca388b..760bc4d5a2cf 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags { * it shouldn't be set. * * @max_tx_aggregation_subframes: maximum number of subframes in an - * aggregate an HT driver will transmit, used by the peer as a - * hint to size its reorder buffer. + * aggregate an HT driver will transmit. Though ADDBA will advertise + * a constant value of 64 as some older APs can crash if the window + * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 + * build 002 Jun 18 2012). * * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX * (if %IEEE80211_HW_QUEUE_CONTROL is set) diff --git a/include/net/ndisc.h b/include/net/ndisc.h index bf3937431030..2d8edaad29cb 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -181,8 +181,7 @@ void ndisc_cleanup(void); int ndisc_rcv(struct sk_buff *skb); void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, - const struct in6_addr *daddr, const struct in6_addr *saddr, - struct sk_buff *oskb); + const struct in6_addr *daddr, const struct in6_addr *saddr); void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4c79ce8c1f92..b2a8e6338576 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -61,6 +61,9 @@ struct Qdisc { */ #define TCQ_F_WARN_NONWC (1 << 16) #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ +#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : + * qdisc_tree_decrease_qlen() should stop. + */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 495c87e367b3..7bbb71081aeb 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -775,10 +775,10 @@ struct sctp_transport { hb_sent:1, /* Is the Path MTU update pending on this tranport */ - pmtu_pending:1; + pmtu_pending:1, - /* Has this transport moved the ctsn since we last sacked */ - __u32 sack_generation; + /* Has this transport moved the ctsn since we last sacked */ + sack_generation:1; u32 dst_cookie; struct flowi fl; @@ -1482,19 +1482,19 @@ struct sctp_association { prsctp_capable:1, /* Can peer do PR-SCTP? */ auth_capable:1; /* Is peer doing SCTP-AUTH? */ - /* Ack State : This flag indicates if the next received + /* sack_needed : This flag indicates if the next received * : packet is to be responded to with a - * : SACK. This is initializedto 0. When a packet - * : is received it is incremented. If this value + * : SACK. This is initialized to 0. When a packet + * : is received sack_cnt is incremented. If this value * : reaches 2 or more, a SACK is sent and the * : value is reset to 0. Note: This is used only * : when no DATA chunks are received out of * : order. When DATA chunks are out of order, * : SACK's are not delayed (see Section 6). */ - __u8 sack_needed; /* Do we need to sack the peer? */ + __u8 sack_needed:1, /* Do we need to sack the peer? */ + sack_generation:1; __u32 sack_cnt; - __u32 sack_generation; __u32 adaptation_ind; /* Adaptation Code point. */ diff --git a/include/net/sock.h b/include/net/sock.h index 7f89e4ba18d1..52d27ee924f4 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -254,7 +254,6 @@ struct cg_proto; * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux * @sk_dst_cache: destination cache - * @sk_dst_lock: destination cache lock * @sk_policy: flow policy * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed @@ -384,14 +383,16 @@ struct sock { int sk_rcvbuf; struct sk_filter __rcu *sk_filter; - struct socket_wq __rcu *sk_wq; - + union { + struct socket_wq __rcu *sk_wq; + struct socket_wq *sk_wq_raw; + }; #ifdef CONFIG_XFRM struct xfrm_policy *sk_policy[2]; #endif struct dst_entry *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; - spinlock_t sk_dst_lock; + /* Note: 32bit hole on 64bit arches */ atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; @@ -2005,10 +2006,27 @@ static inline unsigned long sock_wspace(struct sock *sk) return amt; } -static inline void sk_wake_async(struct sock *sk, int how, int band) +/* Note: + * We use sk->sk_wq_raw, from contexts knowing this + * pointer is not NULL and cannot disappear/change. + */ +static inline void sk_set_bit(int nr, struct sock *sk) { - if (sock_flag(sk, SOCK_FASYNC)) - sock_wake_async(sk->sk_socket, how, band); + set_bit(nr, &sk->sk_wq_raw->flags); +} + +static inline void sk_clear_bit(int nr, struct sock *sk) +{ + clear_bit(nr, &sk->sk_wq_raw->flags); +} + +static inline void sk_wake_async(const struct sock *sk, int how, int band) +{ + if (sock_flag(sk, SOCK_FASYNC)) { + rcu_read_lock(); + sock_wake_async(rcu_dereference(sk->sk_wq), how, band); + rcu_read_unlock(); + } } /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index ed527121031d..fcfa3d7f5e7e 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -668,6 +668,9 @@ struct Scsi_Host { unsigned use_blk_mq:1; unsigned use_cmd_list:1; + /* Host responded with short (<36 bytes) INQUIRY result */ + unsigned short_inquiry:1; + /* * Optional work queue to be utilized by the transport */ diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h index 930b41e5acf4..fa341fcb5829 100644 --- a/include/sound/hda_i915.h +++ b/include/sound/hda_i915.h @@ -10,6 +10,9 @@ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); int snd_hdac_display_power(struct hdac_bus *bus, bool enable); int snd_hdac_get_display_clk(struct hdac_bus *bus); +int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); +int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, + bool *audio_enabled, char *buffer, int max_bytes); int snd_hdac_i915_init(struct hdac_bus *bus); int snd_hdac_i915_exit(struct hdac_bus *bus); int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *); @@ -26,6 +29,17 @@ static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) { return 0; } +static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, + int rate) +{ + return 0; +} +static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, + bool *audio_enabled, char *buffer, + int max_bytes) +{ + return -ENODEV; +} static inline int snd_hdac_i915_init(struct hdac_bus *bus) { return -ENODEV; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0a2c74008e53..aabf0aca0171 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -474,7 +474,7 @@ struct se_cmd { struct completion cmd_wait_comp; const struct target_core_fabric_ops *se_tfo; sense_reason_t (*execute_cmd)(struct se_cmd *); - sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); void *protocol_data; unsigned char *t_task_cdb; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 484a9fb20479..67ef73a5d6eb 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1079,6 +1079,12 @@ struct drm_i915_gem_context_destroy { }; struct drm_i915_reg_read { + /* + * Register offset. + * For 64bit wide registers where the upper 32bits don't immediately + * follow the lower 32bits, the offset of the lower 32bits must + * be specified + */ __u64 offset; __u64 val; /* Return value */ }; @@ -1125,8 +1131,9 @@ struct drm_i915_gem_context_param { __u32 ctx_id; __u32 size; __u64 param; -#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 -#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 +#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 __u64 value; }; diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index 654bae3f1a38..5e6296160361 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h @@ -33,17 +33,6 @@ #define NFS_PIPE_DIRNAME "nfs" -/* NFS ioctls */ -/* Let's follow btrfs lead on CLONE to avoid messing userspace */ -#define NFS_IOC_CLONE _IOW(0x94, 9, int) -#define NFS_IOC_CLONE_RANGE _IOW(0x94, 13, int) - -struct nfs_ioctl_clone_range_args { - __s64 src_fd; - __u64 src_off, count; - __u64 dst_off; -}; - /* * NFS stats. The good thing with these values is that NFSv3 errors are * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 85dedca3dcfb..eeba75395f7d 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -343,7 +343,6 @@ struct ipu_client_platformdata { int di; int dc; int dp; - int dmfc; int dma[2]; }; diff --git a/kernel/async.c b/kernel/async.c index 4c3773c0bf63..d2edd6efec56 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -326,3 +326,4 @@ bool current_is_async(void) return worker && worker->current_func == async_run_entry_fn; } +EXPORT_SYMBOL_GPL(current_is_async); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 3f4c99e06c6b..b0799bced518 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) attr->value_size == 0) return ERR_PTR(-EINVAL); + if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) + /* if value_size is bigger, the user space won't be able to + * access the elements. + */ + return ERR_PTR(-E2BIG); + elem_size = round_up(attr->value_size, 8); /* check round_up into zero and u32 overflow */ if (elem_size == 0 || - attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) + attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) return ERR_PTR(-ENOMEM); array_size = sizeof(*array) + attr->max_entries * elem_size; @@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, /* all elements already exist */ return -EEXIST; - memcpy(array->value + array->elem_size * index, value, array->elem_size); + memcpy(array->value + array->elem_size * index, value, map->value_size); return 0; } diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 19909b22b4f8..34777b3746fa 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) */ goto free_htab; - err = -ENOMEM; + if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) - + MAX_BPF_STACK - sizeof(struct htab_elem)) + /* if value_size is bigger, the user space won't be able to + * access the elements via bpf syscall. This check also makes + * sure that the elem_size doesn't overflow and it's + * kmalloc-able later in htab_map_update_elem() + */ + goto free_htab; + + htab->elem_size = sizeof(struct htab_elem) + + round_up(htab->map.key_size, 8) + + htab->map.value_size; + /* prevent zero size kmalloc and check for u32 overflow */ if (htab->n_buckets == 0 || htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) goto free_htab; + if ((u64) htab->n_buckets * sizeof(struct hlist_head) + + (u64) htab->elem_size * htab->map.max_entries >= + U32_MAX - PAGE_SIZE) + /* make sure page count doesn't overflow */ + goto free_htab; + + htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + + htab->elem_size * htab->map.max_entries, + PAGE_SIZE) >> PAGE_SHIFT; + + err = -ENOMEM; htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), GFP_USER | __GFP_NOWARN); @@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) raw_spin_lock_init(&htab->lock); htab->count = 0; - htab->elem_size = sizeof(struct htab_elem) + - round_up(htab->map.key_size, 8) + - htab->map.value_size; - - htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + - htab->elem_size * htab->map.max_entries, - PAGE_SIZE) >> PAGE_SHIFT; return &htab->map; free_htab: @@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, WARN_ON_ONCE(!rcu_read_lock_held()); /* allocate new element outside of lock */ - l_new = kmalloc(htab->elem_size, GFP_ATOMIC); + l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); if (!l_new) return -ENOMEM; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index be6d726e31c9..5a8a797d50b7 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type) atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); break; case BPF_TYPE_MAP: - atomic_inc(&((struct bpf_map *)raw)->refcnt); + bpf_map_inc(raw, true); break; default: WARN_ON_ONCE(1); @@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type) bpf_prog_put(raw); break; case BPF_TYPE_MAP: - bpf_map_put(raw); + bpf_map_put_with_uref(raw); break; default: WARN_ON_ONCE(1); @@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) void *raw; *type = BPF_TYPE_MAP; - raw = bpf_map_get(ufd); + raw = bpf_map_get_with_uref(ufd); if (IS_ERR(raw)) { *type = BPF_TYPE_PROG; raw = bpf_prog_get(ufd); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0d3313d02a7e..3b39550d8485 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work) map->ops->map_free(map); } +static void bpf_map_put_uref(struct bpf_map *map) +{ + if (atomic_dec_and_test(&map->usercnt)) { + if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) + bpf_fd_array_map_clear(map); + } +} + /* decrement map refcnt and schedule it for freeing via workqueue * (unrelying map implementation ops->map_free() might sleep) */ @@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map) } } -static int bpf_map_release(struct inode *inode, struct file *filp) +void bpf_map_put_with_uref(struct bpf_map *map) { - struct bpf_map *map = filp->private_data; - - if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) - /* prog_array stores refcnt-ed bpf_prog pointers - * release them all when user space closes prog_array_fd - */ - bpf_fd_array_map_clear(map); - + bpf_map_put_uref(map); bpf_map_put(map); +} + +static int bpf_map_release(struct inode *inode, struct file *filp) +{ + bpf_map_put_with_uref(filp->private_data); return 0; } @@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr) return PTR_ERR(map); atomic_set(&map->refcnt, 1); + atomic_set(&map->usercnt, 1); err = bpf_map_charge_memlock(map); if (err) @@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f) return f.file->private_data; } -struct bpf_map *bpf_map_get(u32 ufd) +void bpf_map_inc(struct bpf_map *map, bool uref) +{ + atomic_inc(&map->refcnt); + if (uref) + atomic_inc(&map->usercnt); +} + +struct bpf_map *bpf_map_get_with_uref(u32 ufd) { struct fd f = fdget(ufd); struct bpf_map *map; @@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd) if (IS_ERR(map)) return map; - atomic_inc(&map->refcnt); + bpf_map_inc(map, true); fdput(f); return map; @@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr) goto free_key; err = -ENOMEM; - value = kmalloc(map->value_size, GFP_USER); + value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); if (!value) goto free_key; @@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr) goto free_key; err = -ENOMEM; - value = kmalloc(map->value_size, GFP_USER); + value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); if (!value) goto free_key; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c6073056badf..a7945d10b378 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ - atomic_inc(&map->refcnt); - + bpf_map_inc(map, false); fdput(f); next_insn: insn++; diff --git a/kernel/pid.c b/kernel/pid.c index ca368793808e..78b3d9f80d44 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) rcu_read_lock(); if (type != PIDTYPE_PID) task = task->group_leader; - pid = get_pid(task->pids[type].pid); + pid = get_pid(rcu_dereference(task->pids[type].pid)); rcu_read_unlock(); return pid; } @@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, if (likely(pid_alive(task))) { if (type != PIDTYPE_PID) task = task->group_leader; - nr = pid_nr_ns(task->pids[type].pid, ns); + nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); } rcu_read_unlock(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d568ac9319e..7063c6a07440 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) #ifdef CONFIG_SMP /* + * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be + * possible to, falsely, observe p->on_cpu == 0. + * + * One must be running (->on_cpu == 1) in order to remove oneself + * from the runqueue. + * + * [S] ->on_cpu = 1; [L] ->on_rq + * UNLOCK rq->lock + * RMB + * LOCK rq->lock + * [S] ->on_rq = 0; [L] ->on_cpu + * + * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock + * from the consecutive calls to schedule(); the first switching to our + * task, the second putting it to sleep. + */ + smp_rmb(); + + /* * If the owning (remote) cpu is still in the middle of schedule() with * this task as prev, wait until its done referencing the task. */ while (p->on_cpu) cpu_relax(); /* - * Pairs with the smp_wmb() in finish_lock_switch(). + * Combined with the control dependency above, we have an effective + * smp_load_acquire() without the need for full barriers. + * + * Pairs with the smp_store_release() in finish_lock_switch(). + * + * This ensures that tasks getting woken will be fully ordered against + * their previous state and preserve Program Order. */ smp_rmb(); @@ -2039,7 +2064,6 @@ out: */ int wake_up_process(struct task_struct *p) { - WARN_ON(task_is_stopped_or_traced(p)); return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); @@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd) { memset(rd, 0, sizeof(*rd)); - if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) goto out; - if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) goto free_span; - if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) goto free_online; - if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_dlo_mask; init_dl_bw(&rd->dl_bw); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 26a54461bf59..05de80b48586 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t) unsigned int seq; cputime_t gtime; + if (!context_tracking_is_enabled()) + return t->gtime; + do { seq = read_seqbegin(&t->vtime_seqlock); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e3cc16312046..8ec86abe0ea1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) raw_spin_unlock(&rt_b->rt_runtime_lock); } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) static void push_irq_work_func(struct irq_work *work); #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index efd3bfc7e347..b242775bf670 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * We must ensure this doesn't happen until the switch is completely * finished. * + * In particular, the load of prev->state in finish_task_switch() must + * happen before this. + * * Pairs with the control dependency and rmb in try_to_wake_up(). */ smp_store_release(&prev->on_cpu, 0); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 052e02672d12..f10bd873e684 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t); __sched int bit_wait(struct wait_bit_key *word) { - if (signal_pending_state(current->state, current)) - return 1; schedule(); + if (signal_pending(current)) + return -EINTR; return 0; } EXPORT_SYMBOL(bit_wait); __sched int bit_wait_io(struct wait_bit_key *word) { - if (signal_pending_state(current->state, current)) - return 1; io_schedule(); + if (signal_pending(current)) + return -EINTR; return 0; } EXPORT_SYMBOL(bit_wait_io); @@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io); __sched int bit_wait_timeout(struct wait_bit_key *word) { unsigned long now = READ_ONCE(jiffies); - if (signal_pending_state(current->state, current)) - return 1; if (time_after_eq(now, word->timeout)) return -EAGAIN; schedule_timeout(word->timeout - now); + if (signal_pending(current)) + return -EINTR; return 0; } EXPORT_SYMBOL_GPL(bit_wait_timeout); @@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); __sched int bit_wait_io_timeout(struct wait_bit_key *word) { unsigned long now = READ_ONCE(jiffies); - if (signal_pending_state(current->state, current)) - return 1; if (time_after_eq(now, word->timeout)) return -EAGAIN; io_schedule_timeout(word->timeout - now); + if (signal_pending(current)) + return -EINTR; return 0; } EXPORT_SYMBOL_GPL(bit_wait_io_timeout); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 75f1d05ea82d..9c6045a27ba3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event) return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; } -static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) -{ - cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; - cpu_buffer->reader_page->read = 0; -} - static void rb_inc_iter(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; @@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer, event = __rb_reserve_next(cpu_buffer, &info); - if (unlikely(PTR_ERR(event) == -EAGAIN)) + if (unlikely(PTR_ERR(event) == -EAGAIN)) { + if (info.add_timestamp) + info.length -= RB_LEN_TIME_EXTEND; goto again; + } if (!event) goto out_fail; @@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) /* Finally update the reader page to the new head */ cpu_buffer->reader_page = reader; - rb_reset_reader_page(cpu_buffer); + cpu_buffer->reader_page->read = 0; if (overwrite != cpu_buffer->last_overrun) { cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; @@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) goto again; out: + /* Update the read_stamp on the first event */ + if (reader && reader->read == 0) + cpu_buffer->read_stamp = reader->page->time_stamp; + arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 6bbc5f652355..4f6ef6912e00 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -582,6 +582,12 @@ static void __ftrace_clear_event_pids(struct trace_array *tr) unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); + unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); + unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); + + unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); + unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); + list_for_each_entry(file, &tr->events, list) { clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); } @@ -1729,6 +1735,16 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, tr, INT_MAX); register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr, 0); + + register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, + tr, INT_MAX); + register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, + tr, 0); + + register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, + tr, INT_MAX); + register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, + tr, 0); } /* diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index a3bffd1ec2b4..70306cc9d814 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo) if (signal_pending(current) || !timeo) break; - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } __set_current_state(TASK_RUNNING); @@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index c91353841e40..ffed8a1d4f27 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan) BT_DBG("chan %p", chan); + /* No need to call l2cap_chan_hold() here since we already own + * the reference taken in smp_new_conn_cb(). This is just the + * first time that we tie it to a specific pointer. The code in + * l2cap_core.c ensures that there's no risk this function wont + * get called if smp_new_conn_cb was previously called. + */ conn->smp = chan; - l2cap_chan_hold(chan); if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) bredr_pairing(chan); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index cc858919108e..aa209b1066c9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) !timeo) break; - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); @@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) if (sock_flag(sk, SOCK_DEAD)) break; - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } finish_wait(sk_sleep(sk), &wait); diff --git a/net/core/datagram.c b/net/core/datagram.c index 617088aee21d..d62af69ad844 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, if (sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e6af42da28d9..f18ae91b652e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, ndm->ndm_pad2 = 0; ndm->ndm_flags = pn->flags | NTF_PROXY; ndm->ndm_type = RTN_UNICAST; - ndm->ndm_ifindex = pn->dev->ifindex; + ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; ndm->ndm_state = NUD_NONE; if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) @@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, if (h > s_h) s_idx = 0; for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { - if (dev_net(n->dev) != net) + if (pneigh_net(n) != net) continue; if (idx < s_idx) goto next; diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 6441f47b1a8f..2e4df84c34a1 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css) kfree(css_cls_state(css)); } -static int update_classid(const void *v, struct file *file, unsigned n) +static int update_classid_sock(const void *v, struct file *file, unsigned n) { int err; struct socket *sock = sock_from_file(file, &err); @@ -67,18 +67,25 @@ static int update_classid(const void *v, struct file *file, unsigned n) return 0; } -static void cgrp_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void update_classid(struct cgroup_subsys_state *css, void *v) { - struct cgroup_cls_state *cs = css_cls_state(css); - void *v = (void *)(unsigned long)cs->classid; + struct css_task_iter it; struct task_struct *p; - cgroup_taskset_for_each(p, tset) { + css_task_iter_start(css, &it); + while ((p = css_task_iter_next(&it))) { task_lock(p); - iterate_fd(p->files, 0, update_classid, v); + iterate_fd(p->files, 0, update_classid_sock, v); task_unlock(p); } + css_task_iter_end(&it); +} + +static void cgrp_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset) +{ + update_classid(css, + (void *)(unsigned long)css_cls_state(css)->classid); } static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) @@ -89,8 +96,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, u64 value) { - css_cls_state(css)->classid = (u32) value; + struct cgroup_cls_state *cs = css_cls_state(css); + + cs->classid = (u32)value; + update_classid(css, (void *)(unsigned long)cs->classid); return 0; } diff --git a/net/core/scm.c b/net/core/scm.c index 3b6899b7d810..8a1741b14302 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) err = put_user(cmlen, &cm->cmsg_len); if (!err) { cmlen = CMSG_SPACE(i*sizeof(int)); + if (msg->msg_controllen < cmlen) + cmlen = msg->msg_controllen; msg->msg_control += cmlen; msg->msg_controllen -= cmlen; } diff --git a/net/core/sock.c b/net/core/sock.c index 1e4dd54bfb5a..e31dfcee1729 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_write_queue); - spin_lock_init(&newsk->sk_dst_lock); rwlock_init(&newsk->sk_callback_lock); lockdep_set_class_and_name(&newsk->sk_callback_lock, af_callback_keys + newsk->sk_family, @@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { u32 max_segs = 1; - __sk_dst_set(sk, dst); + sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; @@ -1815,7 +1814,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) { DEFINE_WAIT(wait); - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (!timeo) break; @@ -1861,7 +1860,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) break; - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; if (!timeo) @@ -2048,9 +2047,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) DEFINE_WAIT(wait); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); finish_wait(sk_sleep(sk), &wait); return rc; } @@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) } else sk->sk_wq = NULL; - spin_lock_init(&sk->sk_dst_lock); rwlock_init(&sk->sk_callback_lock); lockdep_set_class_and_name(&sk->sk_callback_lock, af_callback_keys + sk->sk_family, diff --git a/net/core/stream.c b/net/core/stream.c index d70f77a0c889..b96f7a79e544 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk) wake_up_interruptible_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) - sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); + sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } } @@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; while (1) { - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); @@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) } if (signal_pending(current)) goto do_interrupted; - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (sk_stream_memory_free(sk) && !vm_wait) break; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index db5fc2440a23..9c6d0508e63a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req security_req_classify_flow(req, flowi6_to_flowi(&fl6)); - final_p = fl6_update_dst(&fl6, np->opt, &final); + rcu_read_lock(); + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { @@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6.daddr = ireq->ir_v6_rmt_addr; - err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); + rcu_read_lock(); + err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), + np->tclass); + rcu_read_unlock(); err = net_xmit_eval(err); } @@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *newnp; const struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_txoptions *opt; struct inet_sock *newinet; struct dccp6_sock *newdp6; struct sock *newsk; @@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, * comment in that function for the gory details. -acme */ - __ip6_dst_store(newsk, dst, NULL, NULL); + ip6_dst_store(newsk, dst, NULL, NULL); newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | NETIF_F_TSO); newdp6 = (struct dccp6_sock *)newsk; @@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, * Yes, keeping reference count would be much more clever, but we make * one more one thing there: reattach optmem to newsk. */ - if (np->opt != NULL) - newnp->opt = ipv6_dup_options(newsk, np->opt); - + opt = rcu_dereference(np->opt); + if (opt) { + opt = ipv6_dup_options(newsk, opt); + RCU_INIT_POINTER(newnp->opt, opt); + } inet_csk(newsk)->icsk_ext_hdr_len = 0; - if (newnp->opt != NULL) - inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + - newnp->opt->opt_flen); + if (opt) + inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + + opt->opt_flen; dccp_sync_mss(newsk, dst_mtu(dst)); @@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct ipv6_pinfo *np = inet6_sk(sk); struct dccp_sock *dp = dccp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; + struct ipv6_txoptions *opt; struct flowi6 fl6; struct dst_entry *dst; int addr_type; @@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.fl6_sport = inet->inet_sport; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - final_p = fl6_update_dst(&fl6, np->opt, &final); + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + final_p = fl6_update_dst(&fl6, opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { @@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; - __ip6_dst_store(sk, dst, NULL, NULL); + ip6_dst_store(sk, dst, NULL, NULL); icsk->icsk_ext_hdr_len = 0; - if (np->opt != NULL) - icsk->icsk_ext_hdr_len = (np->opt->opt_flen + - np->opt->opt_nflen); + if (opt) + icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; inet->inet_dport = usin->sin6_port; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b5cf13a28009..41e65804ddf5 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, if (sk_stream_is_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ - set_bit(SOCK_ASYNC_NOSPACE, - &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 675cf94e04f8..eebf5ac8ce18 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1747,9 +1747,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); finish_wait(sk_sleep(sk), &wait); } @@ -2004,10 +2004,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); sk_wait_event(sk, &timeo, !dn_queue_too_long(scp, queue, flags)); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); finish_wait(sk_sleep(sk), &wait); continue; } diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index 4677b6fa6dda..ecc28cff08ab 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c @@ -67,7 +67,7 @@ * Returns the size of the result on success, -ve error code otherwise. */ int dns_query(const char *type, const char *name, size_t namelen, - const char *options, char **_result, time_t *_expiry) + const char *options, char **_result, time64_t *_expiry) { struct key *rkey; const struct user_key_payload *upayload; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 35a9788bb3ae..c7d1adca30d8 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type) return; out: - WARN_ON_ONCE("HSR: Could not send supervision frame\n"); + WARN_ONCE(1, "HSR: Could not send supervision frame\n"); kfree_skb(skb); } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6baf36e11808..05e4cba14162 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) ASSERT_RTNL(); in_dev = ip_mc_find_dev(net, imr); - if (!in_dev) { + if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { ret = -ENODEV; goto out; } @@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) *imlp = iml->next_rcu; - ip_mc_dec_group(in_dev, group); + if (in_dev) + ip_mc_dec_group(in_dev, group); /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 92dd4b74d513..c3a38353f5dc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, int cmd); -static void mroute_clean_tables(struct mr_table *mrt); +static void mroute_clean_tables(struct mr_table *mrt, bool all); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) static void ipmr_free_table(struct mr_table *mrt) { del_timer_sync(&mrt->ipmr_expire_timer); - mroute_clean_tables(mrt); + mroute_clean_tables(mrt, true); kfree(mrt); } @@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) return dev; failure: - /* allow the register to be completed before unregistering. */ - rtnl_unlock(); - rtnl_lock(); - unregister_netdevice(dev); return NULL; } @@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) return dev; failure: - /* allow the register to be completed before unregistering. */ - rtnl_unlock(); - rtnl_lock(); - unregister_netdevice(dev); return NULL; } @@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct mr_table *mrt) +static void mroute_clean_tables(struct mr_table *mrt, bool all) { int i; LIST_HEAD(list); @@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt) /* Shut down all active vif entries */ for (i = 0; i < mrt->maxvif; i++) { - if (!(mrt->vif_table[i].flags & VIFF_STATIC)) - vif_delete(mrt, i, 0, &list); + if (!all && (mrt->vif_table[i].flags & VIFF_STATIC)) + continue; + vif_delete(mrt, i, 0, &list); } unregister_netdevice_many(&list); @@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt) for (i = 0; i < MFC_LINES; i++) { list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { - if (c->mfc_flags & MFC_STATIC) + if (!all && (c->mfc_flags & MFC_STATIC)) continue; list_del_rcu(&c->list); mroute_netlink_event(mrt, c, RTM_DELROUTE); @@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk) NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all); RCU_INIT_POINTER(mrt->mroute_sk, NULL); - mroute_clean_tables(mrt); + mroute_clean_tables(mrt, false); } } rtnl_unlock(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c1728771cf89..c82cca18c90f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (sk_stream_is_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ - set_bit(SOCK_ASYNC_NOSPACE, - &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after @@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, goto out_err; } - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); mss_now = tcp_send_mss(sk, &size_goal, flags); copied = 0; @@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) } /* This should be in poll */ - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); mss_now = tcp_send_mss(sk, &size_goal, flags); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fdd88c3803a6..2d656eef7f8e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_buff *skb; + int err = -ENOMEM; + int data_len = 0; bool fragstolen; if (size == 0) return 0; - skb = alloc_skb(size, sk->sk_allocation); + if (size > PAGE_SIZE) { + int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); + + data_len = npages << PAGE_SHIFT; + size = data_len + (size & ~PAGE_MASK); + } + skb = alloc_skb_with_frags(size - data_len, data_len, + PAGE_ALLOC_COSTLY_ORDER, + &err, sk->sk_allocation); if (!skb) goto err; + skb_put(skb, size - data_len); + skb->data_len = data_len; + skb->len = size; + if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; - if (memcpy_from_msg(skb_put(skb, size), msg, size)) + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); + if (err) goto err_free; TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; @@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) err_free: kfree_skb(skb); err: - return -ENOMEM; + return err; + } static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) @@ -5667,6 +5683,7 @@ discard: } tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; + tp->copied_seq = tp->rcv_nxt; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ba09016d1bfd..db003438aaf5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, } md5sig = rcu_dereference_protected(tp->md5sig_info, - sock_owned_by_user(sk)); + sock_owned_by_user(sk) || + lockdep_is_held(&sk->sk_lock.slock)); if (!md5sig) { md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c9c716a483e4..193ba1fa8a9a 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk) dst_negative_advice(sk); if (tp->syn_fastopen || tp->syn_data) tcp_fastopen_cache_set(sk, 0, NULL, true, 0); - if (tp->syn_data) + if (tp->syn_data && icsk->icsk_retransmits == 1) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); } @@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk) syn_set = true; } else { if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { + /* Some middle-boxes may black-hole Fast Open _after_ + * the handshake. Therefore we conservatively disable + * Fast Open on this path on recurring timeouts with + * few or zero bytes acked after Fast Open. + */ + if (tp->syn_data_acked && + tp->bytes_acked <= tp->rx_opt.mss_clamp) { + tcp_fastopen_cache_set(sk, 0, NULL, true, 0); + if (icsk->icsk_retransmits == sysctl_tcp_retries1) + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPFASTOPENACTIVEFAIL); + } /* Black hole detection */ tcp_mtu_probing(icsk, sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24ec14f9825c..0c7b0e61b917 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -100,7 +100,6 @@ #include <linux/slab.h> #include <net/tcp_states.h> #include <linux/skbuff.h> -#include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d84742f003a9..61f26851655c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3642,7 +3642,7 @@ static void addrconf_dad_work(struct work_struct *w) /* send a neighbour solicitation for our addr */ addrconf_addr_solict_mult(&ifp->addr, &mcaddr); - ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL); + ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any); out: in6_ifa_put(ifp); rtnl_unlock(); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 44bb66bde0e2..8ec0df75f1c4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk) /* Free tx options */ - opt = xchg(&np->opt, NULL); - if (opt) - sock_kfree_s(sk, opt, opt->tot_len); + opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); + if (opt) { + atomic_sub(opt->tot_len, &sk->sk_omem_alloc); + txopt_put(opt); + } } EXPORT_SYMBOL_GPL(inet6_destroy_sock); @@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk) fl6.fl6_sport = inet->inet_sport; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - final_p = fl6_update_dst(&fl6, np->opt, &final); + rcu_read_lock(); + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), + &final); + rcu_read_unlock(); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { @@ -668,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk) return PTR_ERR(dst); } - __ip6_dst_store(sk, dst, NULL, NULL); + ip6_dst_store(sk, dst, NULL, NULL); } return 0; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index d70b0238f468..517c55b01ba8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -167,8 +167,10 @@ ipv4_connected: security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - opt = flowlabel ? flowlabel->opt : np->opt; + rcu_read_lock(); + opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); final_p = fl6_update_dst(&fl6, opt, &final); + rcu_read_unlock(); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); err = 0; diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index ce203b0402be..ea7c4d64a00a 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) *((char **)&opt2->dst1opt) += dif; if (opt2->srcrt) *((char **)&opt2->srcrt) += dif; + atomic_set(&opt2->refcnt, 1); } return opt2; } @@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, return ERR_PTR(-ENOBUFS); memset(opt2, 0, tot_len); - + atomic_set(&opt2->refcnt, 1); opt2->tot_len = tot_len; p = (char *)(opt2 + 1); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 36c5a98b0472..0a37ddc7af51 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); } -/* - * Special lock-class for __icmpv6_sk: - */ -static struct lock_class_key icmpv6_socket_sk_dst_lock_key; - static int __net_init icmpv6_sk_init(struct net *net) { struct sock *sk; @@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net) net->ipv6.icmp_sk[i] = sk; - /* - * Split off their lock-class, because sk->sk_dst_lock - * gets used from softirqs, which is safe for - * __icmpv6_sk (because those never get directly used - * via userspace syscalls), but unsafe for normal sockets. - */ - lockdep_set_class(&sk->sk_dst_lock, - &icmpv6_socket_sk_dst_lock_key); - /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 5d1c7cee2cb2..a7ca2cde2ecb 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, memset(fl6, 0, sizeof(*fl6)); fl6->flowi6_proto = proto; fl6->daddr = ireq->ir_v6_rmt_addr; - final_p = fl6_update_dst(fl6, np->opt, &final); + rcu_read_lock(); + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); fl6->saddr = ireq->ir_v6_loc_addr; fl6->flowi6_oif = ireq->ir_iif; fl6->flowi6_mark = ireq->ir_mark; @@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); static inline -void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, - const struct in6_addr *daddr, - const struct in6_addr *saddr) -{ - __ip6_dst_store(sk, dst, daddr, saddr); -} - -static inline struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) { return __sk_dst_check(sk, cookie); @@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, fl6->fl6_dport = inet->inet_dport; security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); - final_p = fl6_update_dst(fl6, np->opt, &final); + rcu_read_lock(); + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); dst = __inet6_csk_dst_check(sk, np->dst_cookie); if (!dst) { dst = ip6_dst_lookup_flow(sk, fl6, final_p); if (!IS_ERR(dst)) - __inet6_csk_dst_store(sk, dst, NULL, NULL); + ip6_dst_store(sk, dst, NULL, NULL); } return dst; } @@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused /* Restore final destination back after routing done */ fl6.daddr = sk->sk_v6_daddr; - res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); + res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), + np->tclass); rcu_read_unlock(); return res; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index eabffbb89795..137fca42aaa6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t) int i; for_each_possible_cpu(i) - ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); + ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); } EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index ad19136086dd..a10e77103c88 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, int cmd); static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb); -static void mroute_clean_tables(struct mr6_table *mrt); +static void mroute_clean_tables(struct mr6_table *mrt, bool all); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES @@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) static void ip6mr_free_table(struct mr6_table *mrt) { del_timer_sync(&mrt->ipmr_expire_timer); - mroute_clean_tables(mrt); + mroute_clean_tables(mrt, true); kfree(mrt); } @@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) return dev; failure: - /* allow the register to be completed before unregistering. */ - rtnl_unlock(); - rtnl_lock(); - unregister_netdevice(dev); return NULL; } @@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct mr6_table *mrt) +static void mroute_clean_tables(struct mr6_table *mrt, bool all) { int i; LIST_HEAD(list); @@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt) * Shut down all active vif entries */ for (i = 0; i < mrt->maxvif; i++) { - if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) - mif6_delete(mrt, i, &list); + if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) + continue; + mif6_delete(mrt, i, &list); } unregister_netdevice_many(&list); @@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt) */ for (i = 0; i < MFC6_LINES; i++) { list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { - if (c->mfc_flags & MFC_STATIC) + if (!all && (c->mfc_flags & MFC_STATIC)) continue; write_lock_bh(&mrt_lock); list_del(&c->list); @@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk) net->ipv6.devconf_all); write_unlock_bh(&mrt_lock); - mroute_clean_tables(mrt); + mroute_clean_tables(mrt, false); err = 0; break; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 63e6956917c9..4449ad1f8114 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); } } - opt = xchg(&inet6_sk(sk)->opt, opt); + opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, + opt); sk_dst_reset(sk); return opt; @@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } - opt = xchg(&np->opt, NULL); - if (opt) - sock_kfree_s(sk, opt, opt->tot_len); + opt = xchg((__force struct ipv6_txoptions **)&np->opt, + NULL); + if (opt) { + atomic_sub(opt->tot_len, &sk->sk_omem_alloc); + txopt_put(opt); + } pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); @@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; - opt = ipv6_renew_options(sk, np->opt, optname, + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + opt = ipv6_renew_options(sk, opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); if (IS_ERR(opt)) { @@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = 0; opt = ipv6_update_options(sk, opt); sticky_done: - if (opt) - sock_kfree_s(sk, opt, opt->tot_len); + if (opt) { + atomic_sub(opt->tot_len, &sk->sk_omem_alloc); + txopt_put(opt); + } break; } @@ -486,6 +493,7 @@ sticky_done: break; memset(opt, 0, sizeof(*opt)); + atomic_set(&opt->refcnt, 1); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) @@ -502,8 +510,10 @@ update: retv = 0; opt = ipv6_update_options(sk, opt); done: - if (opt) - sock_kfree_s(sk, opt, opt->tot_len); + if (opt) { + atomic_sub(opt->tot_len, &sk->sk_omem_alloc); + txopt_put(opt); + } break; } case IPV6_UNICAST_HOPS: @@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, case IPV6_RTHDR: case IPV6_DSTOPTS: { + struct ipv6_txoptions *opt; lock_sock(sk); - len = ipv6_getsockopt_sticky(sk, np->opt, - optname, optval, len); + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len); release_sock(sk); /* check if ipv6_getsockopt_sticky() returns err code */ if (len < 0) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3e0f855e1bea..d6161e1c48c8 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) } void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, - const struct in6_addr *daddr, const struct in6_addr *saddr, - struct sk_buff *oskb) + const struct in6_addr *daddr, const struct in6_addr *saddr) { struct sk_buff *skb; struct in6_addr addr_buf; @@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, dev->dev_addr); - if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb) - skb_dst_copy(skb, oskb); - ndisc_send_skb(skb, daddr, saddr); } @@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) "%s: trying to ucast probe in NUD_INVALID: %pI6\n", __func__, target); } - ndisc_send_ns(dev, target, target, saddr, skb); + ndisc_send_ns(dev, target, target, saddr); } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { neigh_app_ns(neigh); } else { addrconf_addr_solict_mult(target, &mcaddr); - ndisc_send_ns(dev, target, &mcaddr, saddr, skb); + ndisc_send_ns(dev, target, &mcaddr, saddr); } } diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d5efeb87350e..bab4441ed4e4 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data) /* Creation primitives. */ static inline struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, struct in6_addr *src, - struct in6_addr *dst, u8 ecn) + struct in6_addr *dst, int iif, u8 ecn) { struct inet_frag_queue *q; struct ip6_create_arg arg; @@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, arg.user = user; arg.src = src; arg.dst = dst; + arg.iif = iif; arg.ecn = ecn; local_bh_disable(); @@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use fhdr = (struct frag_hdr *)skb_transport_header(clone); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, - ip6_frag_ecn(hdr)); + skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); goto ret_orig; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index dc65ec198f7c..99140986e887 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { + struct ipv6_txoptions *opt_to_free = NULL; struct ipv6_txoptions opt_space; DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); struct in6_addr *daddr, *final_p, final; @@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; } - if (!opt) - opt = np->opt; + if (!opt) { + opt = txopt_get(np); + opt_to_free = opt; + } if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); @@ -906,6 +909,7 @@ done: dst_release(dst); out: fl6_sock_release(flowlabel); + txopt_put(opt_to_free); return err < 0 ? err : len; do_confirm: dst_confirm(dst); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 44e21a03cfc3..45f5ae51de65 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) return fq->id == arg->id && fq->user == arg->user && ipv6_addr_equal(&fq->saddr, arg->src) && - ipv6_addr_equal(&fq->daddr, arg->dst); + ipv6_addr_equal(&fq->daddr, arg->dst) && + (arg->iif == fq->iif || + !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST | + IPV6_ADDR_LINKLOCAL))); } EXPORT_SYMBOL(ip6_frag_match); @@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data) static struct frag_queue * fq_find(struct net *net, __be32 id, const struct in6_addr *src, - const struct in6_addr *dst, u8 ecn) + const struct in6_addr *dst, int iif, u8 ecn) { struct inet_frag_queue *q; struct ip6_create_arg arg; @@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, arg.user = IP6_DEFRAG_LOCAL_DELIVER; arg.src = src; arg.dst = dst; + arg.iif = iif; arg.ecn = ecn; hash = inet6_hash_frag(id, src, dst); @@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) } fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, - ip6_frag_ecn(hdr)); + skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq) { int ret; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6f01fe122abd..826e6aa44f8d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -523,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w) container_of(w, struct __rt6_probe_work, work); addrconf_addr_solict_mult(&work->target, &mcaddr); - ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL); + ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL); dev_put(work->dev); kfree(work); } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index bb8f2fa1c7fb..eaf7ac496d50 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = ireq->ir_v6_rmt_addr; - final_p = fl6_update_dst(&fl6, np->opt, &final); + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); fl6.saddr = ireq->ir_v6_loc_addr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = ireq->ir_mark; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c5429a636f1a..e7aab561b7b4 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; + struct ipv6_txoptions *opt; struct flowi6 fl6; struct dst_entry *dst; int addr_type; @@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; - final_p = fl6_update_dst(&fl6, np->opt, &final); + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); + final_p = fl6_update_dst(&fl6, opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); @@ -255,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; - __ip6_dst_store(sk, dst, NULL, NULL); + ip6_dst_store(sk, dst, NULL, NULL); if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && @@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tcp_fetch_timewait_stamp(sk, dst); icsk->icsk_ext_hdr_len = 0; - if (np->opt) - icsk->icsk_ext_hdr_len = (np->opt->opt_flen + - np->opt->opt_nflen); + if (opt) + icsk->icsk_ext_hdr_len = opt->opt_flen + + opt->opt_nflen; tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); @@ -461,7 +463,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, if (np->repflow && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); - err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); + err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), + np->tclass); err = net_xmit_eval(err); } @@ -972,6 +975,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; const struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_txoptions *opt; struct tcp6_sock *newtcp6sk; struct inet_sock *newinet; struct tcp_sock *newtp; @@ -1056,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * */ newsk->sk_gso_type = SKB_GSO_TCPV6; - __ip6_dst_store(newsk, dst, NULL, NULL); + ip6_dst_store(newsk, dst, NULL, NULL); inet6_sk_rx_dst_set(newsk, skb); newtcp6sk = (struct tcp6_sock *)newsk; @@ -1098,13 +1102,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * but we make one more one thing there: reattach optmem to newsk. */ - if (np->opt) - newnp->opt = ipv6_dup_options(newsk, np->opt); - + opt = rcu_dereference(np->opt); + if (opt) { + opt = ipv6_dup_options(newsk, opt); + RCU_INIT_POINTER(newnp->opt, opt); + } inet_csk(newsk)->icsk_ext_hdr_len = 0; - if (newnp->opt) - inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + - newnp->opt->opt_flen); + if (opt) + inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + + opt->opt_flen; tcp_ca_openreq_child(newsk, dst); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 01bcb49619ee..9da3287a3923 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); struct in6_addr *daddr, *final_p, final; struct ipv6_txoptions *opt = NULL; + struct ipv6_txoptions *opt_to_free = NULL; struct ip6_flowlabel *flowlabel = NULL; struct flowi6 fl6; struct dst_entry *dst; @@ -1263,8 +1264,10 @@ do_udp_sendmsg: opt = NULL; connected = 0; } - if (!opt) - opt = np->opt; + if (!opt) { + opt = txopt_get(np); + opt_to_free = opt; + } if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); @@ -1373,6 +1376,7 @@ release_dst: out: dst_release(dst); fl6_sock_release(flowlabel); + txopt_put(opt_to_free); if (!err) return len; /* diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index fcb2752419c6..435608c4306d 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, if (sock_writeable(sk) && iucv_below_msglim(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index aca38d8aed8e..a2c8747d2936 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); struct in6_addr *daddr, *final_p, final; struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_txoptions *opt_to_free = NULL; struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; @@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) opt = NULL; } - if (opt == NULL) - opt = np->opt; + if (!opt) { + opt = txopt_get(np); + opt_to_free = opt; + } if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); @@ -631,6 +634,7 @@ done: dst_release(dst); out: fl6_sock_release(flowlabel); + txopt_put(opt_to_free); return err < 0 ? err : len; diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index a758eb84e8f0..ff757181b0a8 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, start_seq_num, - local->hw.max_tx_aggregation_subframes, + IEEE80211_MAX_AMPDU_BUF, tid_tx->timeout); } @@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; + buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); mutex_lock(&sta->ampdu_mlme.mtx); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c2bd1b6a6922..da471eef07bb 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3454,8 +3454,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, goto out_unlock; } } else { - /* for cookie below */ - ack_skb = skb; + /* Assign a dummy non-zero cookie, it's not sent to + * userspace in this case but we rely on its value + * internally in the need_offchan case to distinguish + * mgmt-tx from remain-on-channel. + */ + *cookie = 0xffffffff; } if (!need_offchan) { diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d0dc1bfaeec2..c9e325d2e120 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, bool update_bss) { - if (__ieee80211_recalc_txpower(sdata) || update_bss) + if (__ieee80211_recalc_txpower(sdata) || + (update_bss && ieee80211_sdata_running(sdata))) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); } @@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) unregister_netdevice(sdata->dev); } else { cfg80211_unregister_wdev(&sdata->wdev); + ieee80211_teardown_sdata(sdata); kfree(sdata); } } @@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) return; ieee80211_do_stop(sdata, true); - ieee80211_teardown_sdata(sdata); } void ieee80211_remove_interfaces(struct ieee80211_local *local) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 858f6b1cb149..175ffcf7fb06 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | NL80211_FEATURE_MAC_ON_CREATE | - NL80211_FEATURE_USERSPACE_MPM | - NL80211_FEATURE_FULL_AP_CLIENT_STATE; + NL80211_FEATURE_USERSPACE_MPM; if (!ops->hw_scan) wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index b890e225a8f1..b3b44a5dd375 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta) static void mesh_path_node_reclaim(struct rcu_head *rp) { struct mpath_node *node = container_of(rp, struct mpath_node, rcu); - struct ieee80211_sub_if_data *sdata = node->mpath->sdata; del_timer_sync(&node->mpath->timer); - atomic_dec(&sdata->u.mesh.mpaths); kfree(node->mpath); kfree(node); } @@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) /* needs to be called with the corresponding hashwlock taken */ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) { - struct mesh_path *mpath; - mpath = node->mpath; + struct mesh_path *mpath = node->mpath; + struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + spin_lock(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING; if (mpath->is_gate) @@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) hlist_del_rcu(&node->list); call_rcu(&node->rcu, mesh_path_node_reclaim); spin_unlock(&mpath->state_lock); + atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 4aeca4b0c3cb..a413e52f7691 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, /* We need to ensure power level is at max for scanning. */ ieee80211_hw_config(local, 0); - if ((req->channels[0]->flags & - IEEE80211_CHAN_NO_IR) || + if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR | + IEEE80211_CHAN_RADAR)) || !req->n_ssids) { next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; } else { @@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) * TODO: channel switching also consumes quite some time, * add that delay as well to get a better estimation */ - if (chan->flags & IEEE80211_CHAN_NO_IR) + if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) return IEEE80211_PASSIVE_CHANNEL_TIME; return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; } @@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, * * In any case, it is not necessary for a passive scan. */ - if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) { + if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || + !scan_req->n_ssids) { *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; local->next_scan_state = SCAN_DECISION; return; diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index b7de0da46acd..ecf0a0196f18 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); pr_debug("mask 0x%x\n", mask); diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index a7a80a6b77b0..653d073bae45 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c @@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work) struct hlist_node *n; hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { - if (vport->ops->type != OVS_VPORT_TYPE_NETDEV) + if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) continue; if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index efb736bb6855..e41cd12d9b2d 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c @@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = { .destroy = ovs_netdev_tunnel_destroy, .get_options = geneve_get_options, .send = dev_queue_xmit, - .owner = THIS_MODULE, }; static int __init ovs_geneve_tnl_init(void) diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index c3257d78d3d2..7f8897f33a67 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = { .create = gre_create, .send = dev_queue_xmit, .destroy = ovs_netdev_tunnel_destroy, - .owner = THIS_MODULE, }; static int __init ovs_gre_tnl_init(void) diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index b327368a3848..6b0190b987ec 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport) if (vport->dev->priv_flags & IFF_OVS_DATAPATH) ovs_netdev_detach_dev(vport); - /* Early release so we can unregister the device */ + /* We can be invoked by both explicit vport deletion and + * underlying netdev deregistration; delete the link only + * if it's not already shutting down. + */ + if (vport->dev->reg_state == NETREG_REGISTERED) + rtnl_delete_link(vport->dev); dev_put(vport->dev); - rtnl_delete_link(vport->dev); vport->dev = NULL; rtnl_unlock(); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 0ac0fd004d7e..31cbc8c5c7db 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name) return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; } -int ovs_vport_ops_register(struct vport_ops *ops) +int __ovs_vport_ops_register(struct vport_ops *ops) { int err = -EEXIST; struct vport_ops *o; @@ -87,7 +87,7 @@ errout: ovs_unlock(); return err; } -EXPORT_SYMBOL_GPL(ovs_vport_ops_register); +EXPORT_SYMBOL_GPL(__ovs_vport_ops_register); void ovs_vport_ops_unregister(struct vport_ops *ops) { @@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options) * * @vport: vport to delete. * - * Detaches @vport from its datapath and destroys it. It is possible to fail - * for reasons such as lack of memory. ovs_mutex must be held. + * Detaches @vport from its datapath and destroys it. ovs_mutex must + * be held. */ void ovs_vport_del(struct vport *vport) { diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index bdfd82a7c064..8ea3a96980ac 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport) return vport->dev->name; } -int ovs_vport_ops_register(struct vport_ops *ops); +int __ovs_vport_ops_register(struct vport_ops *ops); +#define ovs_vport_ops_register(ops) \ + ({ \ + (ops)->owner = THIS_MODULE; \ + __ovs_vport_ops_register(ops); \ + }) + void ovs_vport_ops_unregister(struct vport_ops *ops); static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1cf928fb573e..992396aa635c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2329,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb) static bool ll_header_truncated(const struct net_device *dev, int len) { /* net device doesn't like empty head */ - if (unlikely(len <= dev->hard_header_len)) { - net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n", + if (unlikely(len < dev->hard_header_len)) { + net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", current->comm, len, dev->hard_header_len); return true; } diff --git a/net/rds/connection.c b/net/rds/connection.c index d4564036a339..e3b118cae81d 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net, } } - if (trans == NULL) { - kmem_cache_free(rds_conn_slab, conn); - conn = ERR_PTR(-ENODEV); - goto out; - } - conn->c_trans = trans; ret = trans->conn_alloc(conn, gfp); diff --git a/net/rds/send.c b/net/rds/send.c index 827155c2ead1..c9cdb358ea88 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) release_sock(sk); } - /* racing with another thread binding seems ok here */ + lock_sock(sk); if (daddr == 0 || rs->rs_bound_addr == 0) { + release_sock(sk); ret = -ENOTCONN; /* XXX not a great errno */ goto out; } + release_sock(sk); if (payload_len > rds_sk_sndbuf(rs)) { ret = -EMSGSIZE; diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index e0547f521f20..adc555e0323d 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -723,8 +723,10 @@ process_further: if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && - hard > tx) + hard > tx) { + call->acks_hard = tx; goto all_acked; + } smp_rmb(); rxrpc_rotate_tx_window(call, hard - 1); diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index a40d3afe93b7..14c4e12c47b0 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); /* this should be in poll */ - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) return -EPIPE; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index f43c8f33f09e..7ec667dd4ce1 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -253,7 +253,8 @@ int qdisc_set_default(const char *name) } /* We know handle. Find qdisc among all qdisc's attached to device - (root qdisc, all its children, children of children etc.) + * (root qdisc, all its children, children of children etc.) + * Note: caller either uses rtnl or rcu_read_lock() */ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) @@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) root->handle == handle) return root; - list_for_each_entry(q, &root->list, list) { + list_for_each_entry_rcu(q, &root->list, list) { if (q->handle == handle) return q; } @@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q) struct Qdisc *root = qdisc_dev(q)->qdisc; WARN_ON_ONCE(root == &noop_qdisc); - list_add_tail(&q->list, &root->list); + ASSERT_RTNL(); + list_add_tail_rcu(&q->list, &root->list); } } EXPORT_SYMBOL(qdisc_list_add); void qdisc_list_del(struct Qdisc *q) { - if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) - list_del(&q->list); + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + ASSERT_RTNL(); + list_del_rcu(&q->list); + } } EXPORT_SYMBOL(qdisc_list_del); @@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) if (n == 0) return; drops = max_t(int, n, 0); + rcu_read_lock(); while ((parentid = sch->parent)) { if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) - return; + break; + if (sch->flags & TCQ_F_NOPARENT) + break; + /* TODO: perform the search on a per txq basis */ sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); if (sch == NULL) { - WARN_ON(parentid != TC_H_ROOT); - return; + WARN_ON_ONCE(parentid != TC_H_ROOT); + break; } cops = sch->ops->cl_ops; if (cops->qlen_notify) { @@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) sch->q.qlen -= n; __qdisc_qstats_drop(sch, drops); } + rcu_read_unlock(); } EXPORT_SYMBOL(qdisc_tree_decrease_qlen); @@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, } lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); if (!netif_is_multiqueue(dev)) - sch->flags |= TCQ_F_ONETXQUEUE; + sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } sch->handle = handle; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index cb5d4ad32946..e82a1ad80aa5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev, return; } if (!netif_is_multiqueue(dev)) - qdisc->flags |= TCQ_F_ONETXQUEUE; + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; dev_queue->qdisc_sleeping = qdisc; } diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index f3cbaecd283a..3e82f047caaf 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) if (qdisc == NULL) goto err; priv->qdiscs[ntx] = qdisc; - qdisc->flags |= TCQ_F_ONETXQUEUE; + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } sch->flags |= TCQ_F_MQROOT; @@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, *old = dev_graft_qdisc(dev_queue, new); if (new) - new->flags |= TCQ_F_ONETXQUEUE; + new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; if (dev->flags & IFF_UP) dev_activate(dev); return 0; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 3811a745452c..ad70ecf57ce7 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) goto err; } priv->qdiscs[i] = qdisc; - qdisc->flags |= TCQ_F_ONETXQUEUE; + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } /* If the mqprio options indicate that hardware should own @@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, *old = dev_graft_qdisc(dev_queue, new); if (new) - new->flags |= TCQ_F_ONETXQUEUE; + new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; if (dev->flags & IFF_UP) dev_activate(dev); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e917d27328ea..acb45b8c2a9d 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); struct flowi6 *fl6 = &transport->fl.u.ip6; + int res; pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, &fl6->saddr, &fl6->daddr); @@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); - return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); + rcu_read_lock(); + res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); + rcu_read_unlock(); + return res; } /* Returns the dst cache entry for the given source and destination ip @@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, pr_debug("src=%pI6 - ", &fl6->saddr); } - final_p = fl6_update_dst(fl6, np->opt, &final); + rcu_read_lock(); + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); + dst = ip6_dst_lookup_flow(sk, fl6, final_p); if (!asoc || saddr) goto out; @@ -321,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (baddr) { fl6->saddr = baddr->v6.sin6_addr; fl6->fl6_sport = baddr->v6.sin6_port; - final_p = fl6_update_dst(fl6, np->opt, &final); + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); dst = ip6_dst_lookup_flow(sk, fl6, final_p); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 897c01c029ca..03c8256063ec 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, return -EFAULT; /* Alloc space for the address array in kernel memory. */ - kaddrs = kmalloc(addrs_size, GFP_KERNEL); + kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); if (unlikely(!kaddrs)) return -ENOMEM; @@ -4928,7 +4928,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); - addrs = kmalloc(space_left, GFP_KERNEL); + addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); if (!addrs) return -ENOMEM; @@ -6458,7 +6458,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and @@ -6801,26 +6801,30 @@ no_packet: static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; - struct socket *sock = sk->sk_socket; - if ((sctp_wspace(asoc) > 0) && sock) { - if (waitqueue_active(&asoc->wait)) - wake_up_interruptible(&asoc->wait); + if (sctp_wspace(asoc) <= 0) + return; + + if (waitqueue_active(&asoc->wait)) + wake_up_interruptible(&asoc->wait); - if (sctp_writeable(sk)) { - wait_queue_head_t *wq = sk_sleep(sk); + if (sctp_writeable(sk)) { + struct socket_wq *wq; - if (wq && waitqueue_active(wq)) - wake_up_interruptible(wq); + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (wq) { + if (waitqueue_active(&wq->wait)) + wake_up_interruptible(&wq->wait); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) - sock_wake_async(sock, - SOCK_WAKE_SPACE, POLL_OUT); + sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); } + rcu_read_unlock(); } } @@ -7375,6 +7379,13 @@ struct proto sctp_prot = { #if IS_ENABLED(CONFIG_IPV6) +#include <net/transp_v6.h> +static void sctp_v6_destroy_sock(struct sock *sk) +{ + sctp_destroy_sock(sk); + inet6_destroy_sock(sk); +} + struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, @@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = { .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, - .destroy = sctp_destroy_sock, + .destroy = sctp_v6_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, diff --git a/net/socket.c b/net/socket.c index dd2c247c99e3..456fadb3d819 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on) return 0; } -/* This function may be called only under socket lock or callback_lock or rcu_lock */ +/* This function may be called only under rcu_lock */ -int sock_wake_async(struct socket *sock, int how, int band) +int sock_wake_async(struct socket_wq *wq, int how, int band) { - struct socket_wq *wq; - - if (!sock) - return -1; - rcu_read_lock(); - wq = rcu_dereference(sock->wq); - if (!wq || !wq->fasync_list) { - rcu_read_unlock(); + if (!wq || !wq->fasync_list) return -1; - } + switch (how) { case SOCK_WAKE_WAITD: - if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) + if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: - if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) + if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) break; /* fall through */ case SOCK_WAKE_IO: @@ -1086,7 +1079,7 @@ call_kill: case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } - rcu_read_unlock(); + return 0; } EXPORT_SYMBOL(sock_wake_async); diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 229956bf8457..95f82d8d4888 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -353,12 +353,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) { struct rpc_xprt *xprt = req->rq_xprt; struct svc_serv *bc_serv = xprt->bc_serv; + struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf; spin_lock(&xprt->bc_pa_lock); list_del(&req->rq_bc_pa_list); xprt_dec_alloc_count(xprt, 1); spin_unlock(&xprt->bc_pa_lock); + if (copied <= rq_rcv_buf->head[0].iov_len) { + rq_rcv_buf->head[0].iov_len = copied; + rq_rcv_buf->page_len = 0; + } else { + rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len; + } + req->rq_private_buf.len = copied; set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index bc5b7b5032ca..7fccf9675df8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); + rqstp->rq_arg.len = req->rq_private_buf.len; /* reset result send buffer "put" position */ resv->iov_len = 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1d1a70498910..2ffaf6a79499 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, if (unlikely(!sock)) return -ENOTSOCK; - clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags); if (base != 0) { addr = NULL; addrlen = 0; @@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task) struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); transport->inet->sk_write_pending--; - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); } /** @@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task) /* Don't race with disconnect */ if (xprt_connected(xprt)) { - if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { + if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) { /* * Notify TCP that we're limited by the application * window size @@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task) xprt_wait_for_buffer_space(task, xs_nospace_callback); } } else { - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); ret = -ENOTCONN; } @@ -626,7 +626,7 @@ process_status: case -EPERM: /* When the server has died, an ICMP port unreachable message * prompts ECONNREFUSED. */ - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); } return status; @@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task) case -EADDRINUSE: case -ENOBUFS: case -EPIPE: - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); } return status; @@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk) if (unlikely(!(xprt = xprt_from_sock(sk)))) return; - if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) + if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0) return; xprt_write_space(xprt); diff --git a/net/tipc/link.c b/net/tipc/link.c index 9efbdbde2b08..91aea071ab27 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l, snd_l->ackers++; rcv_l->acked = snd_l->snd_nxt - 1; + snd_l->state = LINK_ESTABLISHED; tipc_link_build_bc_init_msg(uc_l, xmitq); } @@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l, rcv_l->state = LINK_RESET; if (!snd_l->ackers) { tipc_link_reset(snd_l); + snd_l->state = LINK_RESET; __skb_queue_purge(xmitq); } } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 552dbaba9cf3..b53246fb0412 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -105,6 +105,7 @@ struct tipc_sock { static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); static void tipc_data_ready(struct sock *sk); static void tipc_write_space(struct sock *sk); +static void tipc_sock_destruct(struct sock *sk); static int tipc_release(struct socket *sock); static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); @@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, sk->sk_rcvbuf = sysctl_tipc_rmem[1]; sk->sk_data_ready = tipc_data_ready; sk->sk_write_space = tipc_write_space; + sk->sk_destruct = tipc_sock_destruct; tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; tsk->sent_unacked = 0; atomic_set(&tsk->dupl_rcvcnt, 0); @@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock) tipc_node_remove_conn(net, dnode, tsk->portid); } - /* Discard any remaining (connection-based) messages in receive queue */ - __skb_queue_purge(&sk->sk_receive_queue); - /* Reject any messages that accumulated in backlog queue */ sock->state = SS_DISCONNECTING; release_sock(sk); @@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk) rcu_read_unlock(); } +static void tipc_sock_destruct(struct sock *sk) +{ + __skb_queue_purge(&sk->sk_receive_queue); +} + /** * filter_connect - Handle all incoming messages for a connection-based socket * @tsk: TIPC socket diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index ad2719ad4c1b..70c03271b798 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; struct rtable *rt; - if (skb_headroom(skb) < UDP_MIN_HEADROOM) - pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); + if (skb_headroom(skb) < UDP_MIN_HEADROOM) { + err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); + if (err) + goto tx_error; + } skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); ub = rcu_dereference_rtnl(b->media_ptr); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 955ec152cb71..45aebd966978 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -326,6 +326,118 @@ found: return s; } +/* Support code for asymmetrically connected dgram sockets + * + * If a datagram socket is connected to a socket not itself connected + * to the first socket (eg, /dev/log), clients may only enqueue more + * messages if the present receive queue of the server socket is not + * "too large". This means there's a second writeability condition + * poll and sendmsg need to test. The dgram recv code will do a wake + * up on the peer_wait wait queue of a socket upon reception of a + * datagram which needs to be propagated to sleeping would-be writers + * since these might not have sent anything so far. This can't be + * accomplished via poll_wait because the lifetime of the server + * socket might be less than that of its clients if these break their + * association with it or if the server socket is closed while clients + * are still connected to it and there's no way to inform "a polling + * implementation" that it should let go of a certain wait queue + * + * In order to propagate a wake up, a wait_queue_t of the client + * socket is enqueued on the peer_wait queue of the server socket + * whose wake function does a wake_up on the ordinary client socket + * wait queue. This connection is established whenever a write (or + * poll for write) hit the flow control condition and broken when the + * association to the server socket is dissolved or after a wake up + * was relayed. + */ + +static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, + void *key) +{ + struct unix_sock *u; + wait_queue_head_t *u_sleep; + + u = container_of(q, struct unix_sock, peer_wake); + + __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, + q); + u->peer_wake.private = NULL; + + /* relaying can only happen while the wq still exists */ + u_sleep = sk_sleep(&u->sk); + if (u_sleep) + wake_up_interruptible_poll(u_sleep, key); + + return 0; +} + +static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) +{ + struct unix_sock *u, *u_other; + int rc; + + u = unix_sk(sk); + u_other = unix_sk(other); + rc = 0; + spin_lock(&u_other->peer_wait.lock); + + if (!u->peer_wake.private) { + u->peer_wake.private = other; + __add_wait_queue(&u_other->peer_wait, &u->peer_wake); + + rc = 1; + } + + spin_unlock(&u_other->peer_wait.lock); + return rc; +} + +static void unix_dgram_peer_wake_disconnect(struct sock *sk, + struct sock *other) +{ + struct unix_sock *u, *u_other; + + u = unix_sk(sk); + u_other = unix_sk(other); + spin_lock(&u_other->peer_wait.lock); + + if (u->peer_wake.private == other) { + __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); + u->peer_wake.private = NULL; + } + + spin_unlock(&u_other->peer_wait.lock); +} + +static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, + struct sock *other) +{ + unix_dgram_peer_wake_disconnect(sk, other); + wake_up_interruptible_poll(sk_sleep(sk), + POLLOUT | + POLLWRNORM | + POLLWRBAND); +} + +/* preconditions: + * - unix_peer(sk) == other + * - association is stable + */ +static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) +{ + int connected; + + connected = unix_dgram_peer_wake_connect(sk, other); + + if (unix_recvq_full(other)) + return 1; + + if (connected) + unix_dgram_peer_wake_disconnect(sk, other); + + return 0; +} + static int unix_writable(const struct sock *sk) { return sk->sk_state != TCP_LISTEN && @@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion) skpair->sk_state_change(skpair); sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); } + + unix_dgram_peer_wake_disconnect(sk, skpair); sock_put(skpair); /* It may now die */ unix_peer(sk) = NULL; } @@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) INIT_LIST_HEAD(&u->link); mutex_init(&u->readlock); /* single task reading lock */ init_waitqueue_head(&u->peer_wait); + init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); unix_insert_socket(unix_sockets_unbound(sk), sk); out: if (sk == NULL) @@ -1033,6 +1148,8 @@ restart: if (unix_peer(sk)) { struct sock *old_peer = unix_peer(sk); unix_peer(sk) = other; + unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); + unix_state_double_unlock(sk, other); if (other != old_peer) @@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen return err; } +static bool unix_passcred_enabled(const struct socket *sock, + const struct sock *other) +{ + return test_bit(SOCK_PASSCRED, &sock->flags) || + !other->sk_socket || + test_bit(SOCK_PASSCRED, &other->sk_socket->flags); +} + /* * Some apps rely on write() giving SCM_CREDENTIALS * We include credentials if source or destination socket @@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, { if (UNIXCB(skb).pid) return; - if (test_bit(SOCK_PASSCRED, &sock->flags) || - !other->sk_socket || - test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { + if (unix_passcred_enabled(sock, other)) { UNIXCB(skb).pid = get_pid(task_tgid(current)); current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); } } +static int maybe_init_creds(struct scm_cookie *scm, + struct socket *socket, + const struct sock *other) +{ + int err; + struct msghdr msg = { .msg_controllen = 0 }; + + err = scm_send(socket, &msg, scm, false); + if (err) + return err; + + if (unix_passcred_enabled(socket, other)) { + scm->pid = get_pid(task_tgid(current)); + current_uid_gid(&scm->creds.uid, &scm->creds.gid); + } + return err; +} + +static bool unix_skb_scm_eq(struct sk_buff *skb, + struct scm_cookie *scm) +{ + const struct unix_skb_parms *u = &UNIXCB(skb); + + return u->pid == scm->pid && + uid_eq(u->uid, scm->creds.uid) && + gid_eq(u->gid, scm->creds.gid) && + unix_secdata_eq(scm, skb); +} + /* * Send AF_UNIX data. */ @@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, struct scm_cookie scm; int max_level; int data_len = 0; + int sk_locked; wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); @@ -1550,12 +1703,14 @@ restart: goto out_free; } + sk_locked = 0; unix_state_lock(other); +restart_locked: err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; - if (sock_flag(other, SOCK_DEAD)) { + if (unlikely(sock_flag(other, SOCK_DEAD))) { /* * Check with 1003.1g - what should * datagram error @@ -1563,10 +1718,14 @@ restart: unix_state_unlock(other); sock_put(other); + if (!sk_locked) + unix_state_lock(sk); + err = 0; - unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk) = NULL; + unix_dgram_peer_wake_disconnect_wakeup(sk, other); + unix_state_unlock(sk); unix_dgram_disconnected(sk, other); @@ -1592,21 +1751,38 @@ restart: goto out_unlock; } - if (unix_peer(other) != sk && unix_recvq_full(other)) { - if (!timeo) { - err = -EAGAIN; - goto out_unlock; + if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + if (timeo) { + timeo = unix_wait_for_peer(other, timeo); + + err = sock_intr_errno(timeo); + if (signal_pending(current)) + goto out_free; + + goto restart; } - timeo = unix_wait_for_peer(other, timeo); + if (!sk_locked) { + unix_state_unlock(other); + unix_state_double_lock(sk, other); + } - err = sock_intr_errno(timeo); - if (signal_pending(current)) - goto out_free; + if (unix_peer(sk) != other || + unix_dgram_peer_wake_me(sk, other)) { + err = -EAGAIN; + sk_locked = 1; + goto out_unlock; + } - goto restart; + if (!sk_locked) { + sk_locked = 1; + goto restart_locked; + } } + if (unlikely(sk_locked)) + unix_state_unlock(sk); + if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); maybe_add_creds(skb, sock, other); @@ -1620,6 +1796,8 @@ restart: return len; out_unlock: + if (sk_locked) + unix_state_unlock(sk); unix_state_unlock(other); out_free: kfree_skb(skb); @@ -1741,8 +1919,10 @@ out_err: static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, int offset, size_t size, int flags) { - int err = 0; - bool send_sigpipe = true; + int err; + bool send_sigpipe = false; + bool init_scm = true; + struct scm_cookie scm; struct sock *other, *sk = socket->sk; struct sk_buff *skb, *newskb = NULL, *tail = NULL; @@ -1760,7 +1940,7 @@ alloc_skb: newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, &err, 0); if (!newskb) - return err; + goto err; } /* we must acquire readlock as we modify already present @@ -1769,12 +1949,12 @@ alloc_skb: err = mutex_lock_interruptible(&unix_sk(other)->readlock); if (err) { err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; - send_sigpipe = false; goto err; } if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; + send_sigpipe = true; goto err_unlock; } @@ -1783,17 +1963,27 @@ alloc_skb: if (sock_flag(other, SOCK_DEAD) || other->sk_shutdown & RCV_SHUTDOWN) { err = -EPIPE; + send_sigpipe = true; goto err_state_unlock; } + if (init_scm) { + err = maybe_init_creds(&scm, socket, other); + if (err) + goto err_state_unlock; + init_scm = false; + } + skb = skb_peek_tail(&other->sk_receive_queue); if (tail && tail == skb) { skb = newskb; - } else if (!skb) { - if (newskb) + } else if (!skb || !unix_skb_scm_eq(skb, &scm)) { + if (newskb) { skb = newskb; - else + } else { + tail = skb; goto alloc_skb; + } } else if (newskb) { /* this is fast path, we don't necessarily need to * call to kfree_skb even though with newskb == NULL @@ -1814,6 +2004,9 @@ alloc_skb: atomic_add(size, &sk->sk_wmem_alloc); if (newskb) { + err = unix_scm_to_skb(&scm, skb, false); + if (err) + goto err_state_unlock; spin_lock(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, newskb); spin_unlock(&other->sk_receive_queue.lock); @@ -1823,7 +2016,7 @@ alloc_skb: mutex_unlock(&unix_sk(other)->readlock); other->sk_data_ready(other); - + scm_destroy(&scm); return size; err_state_unlock: @@ -1834,6 +2027,8 @@ err: kfree_skb(newskb); if (send_sigpipe && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); + if (!init_scm) + scm_destroy(&scm); return err; } @@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, !timeo) break; - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); unix_state_unlock(sk); timeo = freezable_schedule_timeout(timeo); unix_state_lock(sk); @@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, if (sock_flag(sk, SOCK_DEAD)) break; - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } finish_wait(sk_sleep(sk), &wait); @@ -2137,10 +2332,7 @@ unlock: if (check_creds) { /* Never glue messages from different writers */ - if ((UNIXCB(skb).pid != scm.pid) || - !uid_eq(UNIXCB(skb).uid, scm.creds.uid) || - !gid_eq(UNIXCB(skb).gid, scm.creds.gid) || - !unix_secdata_eq(&scm, skb)) + if (!unix_skb_scm_eq(skb, &scm)) break; } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { /* Copy credentials */ @@ -2476,20 +2668,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, return mask; writable = unix_writable(sk); - other = unix_peer_get(sk); - if (other) { - if (unix_peer(other) != sk) { - sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); - if (unix_recvq_full(other)) - writable = 0; - } - sock_put(other); + if (writable) { + unix_state_lock(sk); + + other = unix_peer(sk); + if (other && unix_peer(other) != sk && + unix_recvq_full(other) && + unix_dgram_peer_wake_me(sk, other)) + writable = 0; + + unix_state_unlock(sk); } if (writable) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 927db9f35ad6..696ccfa08d10 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) size_t datalen = prep->datalen; int ret = 0; + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 903dace648a1..16dec53184b6 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1007,13 +1007,16 @@ static void trusted_rcu_free(struct rcu_head *rcu) */ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { - struct trusted_key_payload *p = key->payload.data[0]; + struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + return -ENOKEY; + p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 28cb30f80256..8705d79b2c6f 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep) if (ret == 0) { /* attach the new data, displacing the old */ - zap = key->payload.data[0]; + if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + zap = key->payload.data[0]; + else + zap = NULL; rcu_assign_keypointer(key, upayload); key->expiry = 0; } diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 18643bf9894d..456e1a9bcfde 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -638,7 +638,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, { struct avtab_node *node; - if (!ctab || !key || !avd || !xperms) + if (!ctab || !key || !avd) return; for (node = avtab_search_node(ctab, key); node; @@ -657,7 +657,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) avd->auditallow |= node->datum.u.data; - if ((node->key.specified & AVTAB_ENABLED) && + if (xperms && (node->key.specified & AVTAB_ENABLED) && (node->key.specified & AVTAB_XPERMS)) services_compute_xperms_drivers(xperms, node); } diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 8fef1b8d1fd8..c50177fb469f 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -118,6 +118,72 @@ int snd_hdac_get_display_clk(struct hdac_bus *bus) } EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); +/* There is a fixed mapping between audio pin node and display port + * on current Intel platforms: + * Pin Widget 5 - PORT B (port = 1 in i915 driver) + * Pin Widget 6 - PORT C (port = 2 in i915 driver) + * Pin Widget 7 - PORT D (port = 3 in i915 driver) + */ +static int pin2port(hda_nid_t pin_nid) +{ + return pin_nid - 4; +} + +/** + * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate + * @bus: HDA core bus + * @nid: the pin widget NID + * @rate: the sample rate to set + * + * This function is supposed to be used only by a HD-audio controller + * driver that needs the interaction with i915 graphics. + * + * This function sets N/CTS value based on the given sample rate. + * Returns zero for success, or a negative error code. + */ +int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate) +{ + struct i915_audio_component *acomp = bus->audio_component; + + if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate) + return -ENODEV; + return acomp->ops->sync_audio_rate(acomp->dev, pin2port(nid), rate); +} +EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate); + +/** + * snd_hdac_acomp_get_eld - Get the audio state and ELD via component + * @bus: HDA core bus + * @nid: the pin widget NID + * @audio_enabled: the pointer to store the current audio state + * @buffer: the buffer pointer to store ELD bytes + * @max_bytes: the max bytes to be stored on @buffer + * + * This function is supposed to be used only by a HD-audio controller + * driver that needs the interaction with i915 graphics. + * + * This function queries the current state of the audio on the given + * digital port and fetches the ELD bytes onto the given buffer. + * It returns the number of bytes for the total ELD data, zero for + * invalid ELD, or a negative error code. + * + * The return size is the total bytes required for the whole ELD bytes, + * thus it may be over @max_bytes. If it's over @max_bytes, it implies + * that only a part of ELD bytes have been fetched. + */ +int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, + bool *audio_enabled, char *buffer, int max_bytes) +{ + struct i915_audio_component *acomp = bus->audio_component; + + if (!acomp || !acomp->ops || !acomp->ops->get_eld) + return -ENODEV; + + return acomp->ops->get_eld(acomp->dev, pin2port(nid), audio_enabled, + buffer, max_bytes); +} +EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld); + static int hdac_component_master_bind(struct device *dev) { struct i915_audio_component *acomp = hdac_acomp; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2a7d29a07f31..cd9b0ffc91dc 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -83,6 +83,7 @@ struct hdmi_spec_per_pin { struct mutex lock; struct delayed_work work; struct snd_kcontrol *eld_ctl; + struct snd_jack *acomp_jack; /* jack via audio component */ int repoll_count; bool setup; /* the stream has been set up by prepare callback */ int channels; /* current number of channels */ @@ -1587,7 +1588,9 @@ static void update_eld(struct hda_codec *codec, &per_pin->eld_ctl->id); } -static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) +/* update ELD and jack state via HD-audio verbs */ +static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, + int repoll) { struct hda_jack_tbl *jack; struct hda_codec *codec = per_pin->codec; @@ -1650,6 +1653,54 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) return ret; } +/* update ELD and jack state via audio component */ +static void sync_eld_via_acomp(struct hda_codec *codec, + struct hdmi_spec_per_pin *per_pin) +{ + struct hdmi_spec *spec = codec->spec; + struct hdmi_eld *eld = &spec->temp_eld; + int size; + + mutex_lock(&per_pin->lock); + size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid, + &eld->monitor_present, eld->eld_buffer, + ELD_MAX_SIZE); + if (size < 0) + goto unlock; + if (size > 0) { + size = min(size, ELD_MAX_SIZE); + if (snd_hdmi_parse_eld(codec, &eld->info, + eld->eld_buffer, size) < 0) + size = -EINVAL; + } + + if (size > 0) { + eld->eld_valid = true; + eld->eld_size = size; + } else { + eld->eld_valid = false; + eld->eld_size = 0; + } + + update_eld(codec, per_pin, eld); + snd_jack_report(per_pin->acomp_jack, + eld->monitor_present ? SND_JACK_AVOUT : 0); + unlock: + mutex_unlock(&per_pin->lock); +} + +static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) +{ + struct hda_codec *codec = per_pin->codec; + + if (codec_has_acomp(codec)) { + sync_eld_via_acomp(codec, per_pin); + return false; /* don't call snd_hda_jack_report_sync() */ + } else { + return hdmi_present_sense_via_verbs(per_pin, repoll); + } +} + static void hdmi_repoll_eld(struct work_struct *work) { struct hdmi_spec_per_pin *per_pin = @@ -1785,17 +1836,6 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) return non_pcm; } -/* There is a fixed mapping between audio pin node and display port - * on current Intel platforms: - * Pin Widget 5 - PORT B (port = 1 in i915 driver) - * Pin Widget 6 - PORT C (port = 2 in i915 driver) - * Pin Widget 7 - PORT D (port = 3 in i915 driver) - */ -static int intel_pin2port(hda_nid_t pin_nid) -{ - return pin_nid - 4; -} - /* * HDMI callbacks */ @@ -1812,7 +1852,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); hda_nid_t pin_nid = per_pin->pin_nid; struct snd_pcm_runtime *runtime = substream->runtime; - struct i915_audio_component *acomp = codec->bus->core.audio_component; bool non_pcm; int pinctl; @@ -1831,10 +1870,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, /* Call sync_audio_rate to set the N/CTS/M manually if necessary */ /* Todo: add DP1.2 MST audio support later */ - if (acomp && acomp->ops && acomp->ops->sync_audio_rate) - acomp->ops->sync_audio_rate(acomp->dev, - intel_pin2port(pin_nid), - runtime->rate); + snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate); non_pcm = check_non_pcm_per_cvt(codec, cvt_nid); mutex_lock(&per_pin->lock); @@ -2100,6 +2136,30 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec) return 0; } +static void free_acomp_jack_priv(struct snd_jack *jack) +{ + struct hdmi_spec_per_pin *per_pin = jack->private_data; + + per_pin->acomp_jack = NULL; +} + +static int add_acomp_jack_kctl(struct hda_codec *codec, + struct hdmi_spec_per_pin *per_pin, + const char *name) +{ + struct snd_jack *jack; + int err; + + err = snd_jack_new(codec->card, name, SND_JACK_AVOUT, &jack, + true, false); + if (err < 0) + return err; + per_pin->acomp_jack = jack; + jack->private_data = per_pin; + jack->private_free = free_acomp_jack_priv; + return 0; +} + static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx) { char hdmi_str[32] = "HDMI/DP"; @@ -2110,6 +2170,8 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx) if (pcmdev > 0) sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev); + if (codec_has_acomp(codec)) + return add_acomp_jack_kctl(codec, per_pin, hdmi_str); phantom_jack = !is_jack_detectable(codec, per_pin->pin_nid); if (phantom_jack) strncat(hdmi_str, " Phantom", @@ -2205,8 +2267,10 @@ static int generic_hdmi_init(struct hda_codec *codec) hda_nid_t pin_nid = per_pin->pin_nid; hdmi_init_pin(codec, pin_nid); - snd_hda_jack_detect_enable_callback(codec, pin_nid, - codec->jackpoll_interval > 0 ? jack_callback : NULL); + if (!codec_has_acomp(codec)) + snd_hda_jack_detect_enable_callback(codec, pin_nid, + codec->jackpoll_interval > 0 ? + jack_callback : NULL); } return 0; } @@ -2236,6 +2300,8 @@ static void generic_hdmi_free(struct hda_codec *codec) cancel_delayed_work_sync(&per_pin->work); eld_proc_free(per_pin); + if (per_pin->acomp_jack) + snd_device_free(codec->card, per_pin->acomp_jack); } if (spec->i915_bound) diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 40ab4476c80a..51cf8256c6cd 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -420,8 +420,7 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) static int nfit_test0_alloc(struct nfit_test *t) { - size_t nfit_size = sizeof(struct acpi_table_nfit) - + sizeof(struct acpi_nfit_system_address) * NUM_SPA + size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA + sizeof(struct acpi_nfit_memory_map) * NUM_MEM + sizeof(struct acpi_nfit_control_region) * NUM_DCR + sizeof(struct acpi_nfit_data_region) * NUM_BDW @@ -471,8 +470,7 @@ static int nfit_test0_alloc(struct nfit_test *t) static int nfit_test1_alloc(struct nfit_test *t) { - size_t nfit_size = sizeof(struct acpi_table_nfit) - + sizeof(struct acpi_nfit_system_address) + size_t nfit_size = sizeof(struct acpi_nfit_system_address) + sizeof(struct acpi_nfit_memory_map) + sizeof(struct acpi_nfit_control_region); @@ -488,39 +486,24 @@ static int nfit_test1_alloc(struct nfit_test *t) return 0; } -static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size) -{ - memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4); - nfit->header.length = size; - nfit->header.revision = 1; - memcpy(nfit->header.oem_id, "LIBND", 6); - memcpy(nfit->header.oem_table_id, "TEST", 5); - nfit->header.oem_revision = 1; - memcpy(nfit->header.asl_compiler_id, "TST", 4); - nfit->header.asl_compiler_revision = 1; -} - static void nfit_test0_setup(struct nfit_test *t) { struct nvdimm_bus_descriptor *nd_desc; struct acpi_nfit_desc *acpi_desc; struct acpi_nfit_memory_map *memdev; void *nfit_buf = t->nfit_buf; - size_t size = t->nfit_size; struct acpi_nfit_system_address *spa; struct acpi_nfit_control_region *dcr; struct acpi_nfit_data_region *bdw; struct acpi_nfit_flush_address *flush; unsigned int offset; - nfit_test_init_header(nfit_buf, size); - /* * spa0 (interleave first half of dimm0 and dimm1, note storage * does not actually alias the related block-data-window * regions) */ - spa = nfit_buf + sizeof(struct acpi_table_nfit); + spa = nfit_buf; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); @@ -533,7 +516,7 @@ static void nfit_test0_setup(struct nfit_test *t) * does not actually alias the related block-data-window * regions) */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); + spa = nfit_buf + sizeof(*spa); spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); @@ -542,7 +525,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = SPA1_SIZE; /* spa2 (dcr0) dimm0 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; + spa = nfit_buf + sizeof(*spa) * 2; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); @@ -551,7 +534,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DCR_SIZE; /* spa3 (dcr1) dimm1 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; + spa = nfit_buf + sizeof(*spa) * 3; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); @@ -560,7 +543,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DCR_SIZE; /* spa4 (dcr2) dimm2 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; + spa = nfit_buf + sizeof(*spa) * 4; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); @@ -569,7 +552,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DCR_SIZE; /* spa5 (dcr3) dimm3 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; + spa = nfit_buf + sizeof(*spa) * 5; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); @@ -578,7 +561,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DCR_SIZE; /* spa6 (bdw for dcr0) dimm0 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; + spa = nfit_buf + sizeof(*spa) * 6; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); @@ -587,7 +570,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DIMM_SIZE; /* spa7 (bdw for dcr1) dimm1 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; + spa = nfit_buf + sizeof(*spa) * 7; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); @@ -596,7 +579,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DIMM_SIZE; /* spa8 (bdw for dcr2) dimm2 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; + spa = nfit_buf + sizeof(*spa) * 8; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); @@ -605,7 +588,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->length = DIMM_SIZE; /* spa9 (bdw for dcr3) dimm3 */ - spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; + spa = nfit_buf + sizeof(*spa) * 9; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; spa->header.length = sizeof(*spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); @@ -613,7 +596,7 @@ static void nfit_test0_setup(struct nfit_test *t) spa->address = t->dimm_dma[3]; spa->length = DIMM_SIZE; - offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; + offset = sizeof(*spa) * 10; /* mem-region0 (spa0, dimm0) */ memdev = nfit_buf + offset; memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; @@ -1100,15 +1083,13 @@ static void nfit_test0_setup(struct nfit_test *t) static void nfit_test1_setup(struct nfit_test *t) { - size_t size = t->nfit_size, offset; + size_t offset; void *nfit_buf = t->nfit_buf; struct acpi_nfit_memory_map *memdev; struct acpi_nfit_control_region *dcr; struct acpi_nfit_system_address *spa; - nfit_test_init_header(nfit_buf, size); - - offset = sizeof(struct acpi_table_nfit); + offset = 0; /* spa0 (flat range with no bdw aliasing) */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; diff --git a/tools/testing/selftests/futex/README b/tools/testing/selftests/futex/README index 3224a049b196..0558bb9ce0a6 100644 --- a/tools/testing/selftests/futex/README +++ b/tools/testing/selftests/futex/README @@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or o Where possible, any helper functions or other package-wide code shall be implemented in header files, avoiding the need to compile intermediate object files. -o External dependendencies shall remain as minimal as possible. Currently gcc +o External dependencies shall remain as minimal as possible. Currently gcc and glibc are the only dependencies. o Tests return 0 for success and < 0 for failure. diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index e38cc54942db..882fe83a3554 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) pid_t parent = getppid(); int fd; void *map1, *map2; + int page_size = sysconf(_SC_PAGESIZE); + + ASSERT_LT(0, page_size); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); @@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) EXPECT_EQ(parent, syscall(__NR_getppid)); map1 = (void *)syscall(sysno, - NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); + NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); EXPECT_NE(MAP_FAILED, map1); /* mmap2() should never return. */ map2 = (void *)syscall(sysno, - NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); + NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); EXPECT_EQ(MAP_FAILED, map2); /* The test failed, so clean up the resources. */ - munmap(map1, PAGE_SIZE); - munmap(map2, PAGE_SIZE); + munmap(map1, page_size); + munmap(map2, page_size); close(fd); } diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 21a0ab2d8919..69bca185c471 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) kvm_timer_update_state(vcpu); /* - * If we enter the guest with the virtual input level to the VGIC - * asserted, then we have already told the VGIC what we need to, and - * we don't need to exit from the guest until the guest deactivates - * the already injected interrupt, so therefore we should set the - * hardware active state to prevent unnecessary exits from the guest. - * - * Conversely, if the virtual input level is deasserted, then always - * clear the hardware active state to ensure that hardware interrupts - * from the timer triggers a guest exit. - */ - if (timer->irq.level) + * If we enter the guest with the virtual input level to the VGIC + * asserted, then we have already told the VGIC what we need to, and + * we don't need to exit from the guest until the guest deactivates + * the already injected interrupt, so therefore we should set the + * hardware active state to prevent unnecessary exits from the guest. + * + * Also, if we enter the guest with the virtual timer interrupt active, + * then it must be active on the physical distributor, because we set + * the HW bit and the guest must be able to deactivate the virtual and + * physical interrupt at the same time. + * + * Conversely, if the virtual input level is deasserted and the virtual + * interrupt is not active, then always clear the hardware active state + * to ensure that hardware interrupts from the timer triggers a guest + * exit. + */ + if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) phys_active = true; else phys_active = false; diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 533538385d5d..65461f821a75 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu) vgic_set_lr(vcpu, lr_nr, vlr); } +static bool dist_active_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); +} + +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) +{ + int i; + + for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { + struct vgic_lr vlr = vgic_get_lr(vcpu, i); + + if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) + return true; + } + + return dist_active_irq(vcpu); +} + /* * An interrupt may have been disabled after being made pending on the * CPU interface (the classic case is a timer running while we're @@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) * may have been serviced from another vcpu. In all cases, * move along. */ - if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) + if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu)) goto epilog; /* SGIs */ @@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; - struct irq_phys_map *map; - bool phys_active; bool level_pending; - int ret; if (!(vlr.state & LR_HW)) return false; - map = vgic_irq_map_search(vcpu, vlr.irq); - BUG_ON(!map); - - ret = irq_get_irqchip_state(map->irq, - IRQCHIP_STATE_ACTIVE, - &phys_active); - - WARN_ON(ret); - - if (phys_active) - return 0; + if (vlr.state & LR_STATE_ACTIVE) + return false; spin_lock(&dist->lock); level_pending = process_queued_irq(vcpu, lr, vlr); @@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); } -int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) -{ - struct vgic_dist *dist = &vcpu->kvm->arch.vgic; - - if (!irqchip_in_kernel(vcpu->kvm)) - return 0; - - return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); -} - - void vgic_kick_vcpus(struct kvm *kvm) { struct kvm_vcpu *vcpu; |